Repository: haiwen/seafile-server Branch: master Commit: 377d6e5805ad Files: 284 Total size: 2.8 MB Directory structure: gitextract_lx6ttehu/ ├── .github/ │ └── workflows/ │ ├── ci.yml │ └── golangci-lint.yml ├── .gitignore ├── LICENSE.txt ├── Makefile.am ├── README.markdown ├── README.testing.md ├── autogen.sh ├── ci/ │ ├── install-deps.sh │ ├── requirements.txt │ ├── run.py │ ├── serverctl.py │ └── utils.py ├── common/ │ ├── Makefile.am │ ├── block-backend-fs.c │ ├── block-backend.c │ ├── block-backend.h │ ├── block-mgr.c │ ├── block-mgr.h │ ├── block-tx-utils.c │ ├── block-tx-utils.h │ ├── block.h │ ├── branch-mgr.c │ ├── branch-mgr.h │ ├── cdc/ │ │ ├── Makefile.am │ │ ├── cdc.c │ │ ├── cdc.h │ │ ├── rabin-checksum.c │ │ └── rabin-checksum.h │ ├── commit-mgr.c │ ├── commit-mgr.h │ ├── common.h │ ├── config-mgr.c │ ├── config-mgr.h │ ├── diff-simple.c │ ├── diff-simple.h │ ├── fs-mgr.c │ ├── fs-mgr.h │ ├── group-mgr.c │ ├── group-mgr.h │ ├── log.c │ ├── log.h │ ├── merge-new.c │ ├── merge-new.h │ ├── mq-mgr.c │ ├── mq-mgr.h │ ├── obj-backend-fs.c │ ├── obj-backend-riak.c │ ├── obj-backend.h │ ├── obj-cache.c │ ├── obj-cache.h │ ├── obj-store.c │ ├── obj-store.h │ ├── object-list.c │ ├── object-list.h │ ├── org-mgr.c │ ├── org-mgr.h │ ├── password-hash.c │ ├── password-hash.h │ ├── processors/ │ │ └── objecttx-common.h │ ├── redis-cache.c │ ├── redis-cache.h │ ├── rpc-service.c │ ├── seaf-db.c │ ├── seaf-db.h │ ├── seaf-utils.c │ ├── seaf-utils.h │ ├── seafile-crypt.c │ ├── seafile-crypt.h │ ├── sync-repo-common.h │ ├── user-mgr.c │ ├── user-mgr.h │ ├── vc-common.c │ └── vc-common.h ├── configure.ac ├── controller/ │ ├── Makefile.am │ ├── seafile-controller.c │ └── seafile-controller.h ├── doc/ │ └── Makefile.am ├── fileserver/ │ ├── .golangci.yml │ ├── blockmgr/ │ │ ├── blockmgr.go │ │ └── blockmgr_test.go │ ├── commitmgr/ │ │ ├── commitmgr.go │ │ ├── commitmgr_test.go │ │ └── null.go │ ├── crypt.go │ ├── diff/ │ │ ├── diff.go │ │ └── diff_test.go │ ├── fileop.go │ ├── fileserver.go │ ├── fsmgr/ │ │ ├── fsmgr.go │ │ └── fsmgr_test.go │ ├── go.mod │ ├── go.sum │ ├── http_code.go │ ├── merge.go │ ├── merge_test.go │ ├── metrics/ │ │ └── metrics.go │ ├── objstore/ │ │ ├── backend_fs.go │ │ ├── objstore.go │ │ └── objstore_test.go │ ├── option/ │ │ └── option.go │ ├── quota.go │ ├── repomgr/ │ │ ├── repomgr.go │ │ └── repomgr_test.go │ ├── searpc/ │ │ ├── searpc.go │ │ └── searpc_test.go │ ├── share/ │ │ ├── group/ │ │ │ └── group.go │ │ ├── public/ │ │ │ └── public.go │ │ └── share.go │ ├── size_sched.go │ ├── sync_api.go │ ├── utils/ │ │ ├── dup2.go │ │ ├── dup3.go │ │ ├── http.go │ │ └── utils.go │ ├── virtual_repo.go │ └── workerpool/ │ └── workerpool.go ├── fuse/ │ ├── Makefile.am │ ├── file.c │ ├── getattr.c │ ├── readdir.c │ ├── repo-mgr.c │ ├── repo-mgr.h │ ├── seaf-fuse.c │ ├── seaf-fuse.h │ ├── seafile-session.c │ └── seafile-session.h ├── include/ │ ├── Makefile.am │ ├── seafile-error.h │ └── seafile-rpc.h ├── lib/ │ ├── Makefile.am │ ├── bloom-filter.c │ ├── bloom-filter.h │ ├── branch.vala │ ├── ccnetobj.vala │ ├── commit.vala │ ├── copy-task.vala │ ├── crypt.vala │ ├── db.c │ ├── db.h │ ├── dir.vala │ ├── dirent.vala │ ├── file.vala │ ├── include.h │ ├── job-mgr.c │ ├── job-mgr.h │ ├── libseafile.pc.in │ ├── net.c │ ├── net.h │ ├── repo.vala │ ├── rpc_table.py │ ├── seahub.vala │ ├── search-result.vala │ ├── task.vala │ ├── timer.c │ ├── timer.h │ ├── utils.c │ ├── utils.h │ └── webaccess.vala ├── m4/ │ ├── ax_lib_sqlite3.m4 │ ├── glib-gettext.m4 │ └── python.m4 ├── notification-server/ │ ├── .golangci.yml │ ├── ccnet.conf │ ├── client.go │ ├── dup2.go │ ├── dup3.go │ ├── event.go │ ├── go.mod │ ├── go.sum │ ├── logger.go │ ├── server.go │ └── subscriptions.go ├── pytest.ini ├── python/ │ ├── LICENSE.txt │ ├── Makefile.am │ ├── seafile/ │ │ ├── Makefile.am │ │ ├── __init__.py │ │ └── rpcclient.py │ └── seaserv/ │ ├── Makefile.am │ ├── __init__.py │ ├── api.py │ └── service.py ├── run_tests.sh ├── scripts/ │ ├── Makefile.am │ ├── parse_seahub_db.py │ └── sql/ │ ├── mysql/ │ │ ├── ccnet.sql │ │ └── seafile.sql │ └── sqlite/ │ ├── config.sql │ ├── groupmgr.sql │ ├── org.sql │ ├── seafile.sql │ └── user.sql ├── server/ │ ├── Makefile.am │ ├── access-file.c │ ├── access-file.h │ ├── change-set.c │ ├── change-set.h │ ├── copy-mgr.c │ ├── copy-mgr.h │ ├── fileserver-config.c │ ├── fileserver-config.h │ ├── gc/ │ │ ├── Makefile.am │ │ ├── fsck.c │ │ ├── fsck.h │ │ ├── gc-core.c │ │ ├── gc-core.h │ │ ├── repo-mgr.c │ │ ├── repo-mgr.h │ │ ├── seaf-fsck.c │ │ ├── seafile-session.c │ │ ├── seafile-session.h │ │ ├── seafserv-gc.c │ │ ├── verify.c │ │ └── verify.h │ ├── http-server.c │ ├── http-server.h │ ├── http-status-codes.h │ ├── http-tx-mgr.c │ ├── http-tx-mgr.h │ ├── index-blocks-mgr.c │ ├── index-blocks-mgr.h │ ├── metric-mgr.c │ ├── metric-mgr.h │ ├── notif-mgr.c │ ├── notif-mgr.h │ ├── pack-dir.c │ ├── pack-dir.h │ ├── passwd-mgr.c │ ├── passwd-mgr.h │ ├── permission-mgr.c │ ├── permission-mgr.h │ ├── quota-mgr.c │ ├── quota-mgr.h │ ├── repo-mgr.c │ ├── repo-mgr.h │ ├── repo-op.c │ ├── repo-perm.c │ ├── seaf-server.c │ ├── seafile-session.c │ ├── seafile-session.h │ ├── share-mgr.c │ ├── share-mgr.h │ ├── size-sched.c │ ├── size-sched.h │ ├── upload-file.c │ ├── upload-file.h │ ├── virtual-repo.c │ ├── web-accesstoken-mgr.c │ ├── web-accesstoken-mgr.h │ ├── zip-download-mgr.c │ └── zip-download-mgr.h ├── tests/ │ ├── __init__.py │ ├── conf/ │ │ ├── ccnet.conf │ │ └── mykey.peer │ ├── config.py │ ├── conftest.py │ ├── test_file_operation/ │ │ ├── test_file_operation.py │ │ ├── test_merge_virtual_repo.py │ │ ├── test_search_files.py │ │ ├── test_upload_and_update.py │ │ ├── test_upload_large_files.py │ │ └── test_zip_download.py │ ├── test_file_property_and_dir_listing/ │ │ └── test_file_property_and_dir_listing.py │ ├── test_gc/ │ │ └── test_gc.py │ ├── test_get_repo_list/ │ │ └── test_get_repo_list.py │ ├── test_group/ │ │ └── test_groups.py │ ├── test_password/ │ │ └── test_password.py │ ├── test_repo_manipulation/ │ │ └── test_repo_manipulation.py │ ├── test_server_config/ │ │ └── test_server_config.py │ ├── test_share_and_perm/ │ │ ├── test_shared_repo_perm.py │ │ └── test_structure_repo_perm.py │ ├── test_trashed_repos/ │ │ └── test_trashed_repos.py │ ├── test_upload/ │ │ ├── account.conf │ │ ├── go.mod │ │ ├── go.sum │ │ ├── readme.md │ │ └── test_upload.go │ ├── test_user/ │ │ └── test_users.py │ └── utils.py ├── tools/ │ ├── Makefile.am │ └── seafile-admin └── updateversion.sh ================================================ FILE CONTENTS ================================================ ================================================ FILE: .github/workflows/ci.yml ================================================ name: Seafile CI on: [push, pull_request] jobs: build: runs-on: ubuntu-latest steps: - uses: actions/checkout@v1 with: fetch-depth: 1 - uses: actions/setup-python@v3 with: python-version: "3.12" - name: install dependencies and test run: | cd $GITHUB_WORKSPACE ./ci/install-deps.sh ./ci/run.py ================================================ FILE: .github/workflows/golangci-lint.yml ================================================ name: golangci-lint on: [push, pull_request] permissions: contents: read # Optional: allow read access to pull request. Use with `only-new-issues` option. # pull-requests: read jobs: golangci-fileserver: name: lint runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - uses: actions/setup-go@v5 with: go-version: "1.22" - name: golangci-lint uses: golangci/golangci-lint-action@v6 with: version: v1.59 working-directory: ./fileserver args: --timeout=5m golangci-notification-server: name: lint runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - uses: actions/setup-go@v5 with: go-version: "1.22" - name: golangci-lint uses: golangci/golangci-lint-action@v6 with: version: v1.59 working-directory: ./notification-server args: --timeout=5m ================================================ FILE: .gitignore ================================================ *~ *.bak *.o *.exe cscope* *# Makefile.in ltmain.sh libtool *.lo *.la install-sh depcomp config.guess config.h config.log config.status config.sub config.cache configure */.deps autom4te* po/POTFILES po/Makefile* po/stamp-it po/*.gmo po/*.pot missing mkinstalldirs stamp-h1 *.libs/ Makefile aclocal.m4 *core m4/intltool.m4 m4/libtool.m4 m4/ltoptions.m4 m4/ltsugar.m4 m4/ltversion.m4 m4/lt~obsolete.m4 ccnet-*.tar.gz config.h.in py-compile intltool-extract.in intltool-merge.in intltool-update.in *.stamp *.pyc *.tmp.ui *.defs *.log .deps *.db *.dll *.aps *.so build-stamp debian/files debian/seafile debian/*.substvars lib/searpc-marshal.h lib/searpc-signature.h lib/*.tmp lib/dir.c lib/dirent.c lib/seafile-object.h lib/task.c lib/webaccess.c lib/branch.c lib/commit.c lib/crypt.c lib/repo.c lib/copy-task.c lib/search-result.c seaf-server seafserv-gc seaf-migrate seaf-fsck seaf-fuse controller/seafile-controller tools/seaf-server-init tests/conf/misc/ tests/conf/seafile-data/ tests/conf/ccnet.db tests/conf/ccnet.sock tests/conf/GroupMgr tests/conf/OrgMgr tests/conf/PeerMgr *.dylib .DS_Store *.pc *.tar.gz /compile /test-driver *.dmp /symbols __pycache__/ .cache/ ================================================ FILE: LICENSE.txt ================================================ This program is released under Affero GPLv3, with the following additional permission to link with OpenSSL library. If you modify this program, or any covered work, by linking or combining it with the OpenSSL project's OpenSSL library (or a modified version of that library), containing parts covered by the terms of the OpenSSL or SSLeay licenses, Seafile Ltd. grants you additional permission to convey the resulting work. Corresponding Source for a non-source form of such a combination shall include the source code for the parts of OpenSSL used as well as that of the covered work. The source code files under 'python' directory is released under Apache License v2.0. You can find Apache License 2.0 file in that directory. GNU AFFERO GENERAL PUBLIC LICENSE Version 3, 19 November 2007 Copyright © 2007 Free Software Foundation, Inc. Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Preamble The GNU Affero General Public License is a free, copyleft license for software and other kinds of works, specifically designed to ensure cooperation with the community in the case of network server software. The licenses for most software and other practical works are designed to take away your freedom to share and change the works. By contrast, our General Public Licenses are intended to guarantee your freedom to share and change all versions of a program--to make sure it remains free software for all its users. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for them if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs, and that you know you can do these things. Developers that use our General Public Licenses protect your rights with two steps: (1) assert copyright on the software, and (2) offer you this License which gives you legal permission to copy, distribute and/or modify the software. A secondary benefit of defending all users' freedom is that improvements made in alternate versions of the program, if they receive widespread use, become available for other developers to incorporate. Many developers of free software are heartened and encouraged by the resulting cooperation. However, in the case of software used on network servers, this result may fail to come about. The GNU General Public License permits making a modified version and letting the public access it on a server without ever releasing its source code to the public. The GNU Affero General Public License is designed specifically to ensure that, in such cases, the modified source code becomes available to the community. It requires the operator of a network server to provide the source code of the modified version running there to the users of that server. Therefore, public use of a modified version, on a publicly accessible server, gives the public access to the source code of the modified version. An older license, called the Affero General Public License and published by Affero, was designed to accomplish similar goals. This is a different license, not a version of the Affero GPL, but Affero has released a new version of the Affero GPL which permits relicensing under this license. The precise terms and conditions for copying, distribution and modification follow. TERMS AND CONDITIONS 0. Definitions. "This License" refers to version 3 of the GNU Affero General Public License. "Copyright" also means copyright-like laws that apply to other kinds of works, such as semiconductor masks. "The Program" refers to any copyrightable work licensed under this License. Each licensee is addressed as "you". "Licensees" and "recipients" may be individuals or organizations. To "modify" a work means to copy from or adapt all or part of the work in a fashion requiring copyright permission, other than the making of an exact copy. The resulting work is called a "modified version" of the earlier work or a work "based on" the earlier work. A "covered work" means either the unmodified Program or a work based on the Program. To "propagate" a work means to do anything with it that, without permission, would make you directly or secondarily liable for infringement under applicable copyright law, except executing it on a computer or modifying a private copy. Propagation includes copying, distribution (with or without modification), making available to the public, and in some countries other activities as well. To "convey" a work means any kind of propagation that enables other parties to make or receive copies. Mere interaction with a user through a computer network, with no transfer of a copy, is not conveying. An interactive user interface displays "Appropriate Legal Notices" to the extent that it includes a convenient and prominently visible feature that (1) displays an appropriate copyright notice, and (2) tells the user that there is no warranty for the work (except to the extent that warranties are provided), that licensees may convey the work under this License, and how to view a copy of this License. If the interface presents a list of user commands or options, such as a menu, a prominent item in the list meets this criterion. 1. Source Code. The "source code" for a work means the preferred form of the work for making modifications to it. "Object code" means any non-source form of a work. A "Standard Interface" means an interface that either is an official standard defined by a recognized standards body, or, in the case of interfaces specified for a particular programming language, one that is widely used among developers working in that language. The "System Libraries" of an executable work include anything, other than the work as a whole, that (a) is included in the normal form of packaging a Major Component, but which is not part of that Major Component, and (b) serves only to enable use of the work with that Major Component, or to implement a Standard Interface for which an implementation is available to the public in source code form. A "Major Component", in this context, means a major essential component (kernel, window system, and so on) of the specific operating system (if any) on which the executable work runs, or a compiler used to produce the work, or an object code interpreter used to run it. The "Corresponding Source" for a work in object code form means all the source code needed to generate, install, and (for an executable work) run the object code and to modify the work, including scripts to control those activities. However, it does not include the work's System Libraries, or general-purpose tools or generally available free programs which are used unmodified in performing those activities but which are not part of the work. For example, Corresponding Source includes interface definition files associated with source files for the work, and the source code for shared libraries and dynamically linked subprograms that the work is specifically designed to require, such as by intimate data communication or control flow between those subprograms and other parts of the work. The Corresponding Source need not include anything that users can regenerate automatically from other parts of the Corresponding Source. The Corresponding Source for a work in source code form is that same work. 2. Basic Permissions. All rights granted under this License are granted for the term of copyright on the Program, and are irrevocable provided the stated conditions are met. This License explicitly affirms your unlimited permission to run the unmodified Program. The output from running a covered work is covered by this License only if the output, given its content, constitutes a covered work. This License acknowledges your rights of fair use or other equivalent, as provided by copyright law. You may make, run and propagate covered works that you do not convey, without conditions so long as your license otherwise remains in force. You may convey covered works to others for the sole purpose of having them make modifications exclusively for you, or provide you with facilities for running those works, provided that you comply with the terms of this License in conveying all material for which you do not control copyright. Those thus making or running the covered works for you must do so exclusively on your behalf, under your direction and control, on terms that prohibit them from making any copies of your copyrighted material outside their relationship with you. Conveying under any other circumstances is permitted solely under the conditions stated below. Sublicensing is not allowed; section 10 makes it unnecessary. 3. Protecting Users' Legal Rights From Anti-Circumvention Law. No covered work shall be deemed part of an effective technological measure under any applicable law fulfilling obligations under article 11 of the WIPO copyright treaty adopted on 20 December 1996, or similar laws prohibiting or restricting circumvention of such measures. When you convey a covered work, you waive any legal power to forbid circumvention of technological measures to the extent such circumvention is effected by exercising rights under this License with respect to the covered work, and you disclaim any intention to limit operation or modification of the work as a means of enforcing, against the work's users, your or third parties' legal rights to forbid circumvention of technological measures. 4. Conveying Verbatim Copies. You may convey verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice; keep intact all notices stating that this License and any non-permissive terms added in accord with section 7 apply to the code; keep intact all notices of the absence of any warranty; and give all recipients a copy of this License along with the Program. You may charge any price or no price for each copy that you convey, and you may offer support or warranty protection for a fee. 5. Conveying Modified Source Versions. You may convey a work based on the Program, or the modifications to produce it from the Program, in the form of source code under the terms of section 4, provided that you also meet all of these conditions: a) The work must carry prominent notices stating that you modified it, and giving a relevant date. b) The work must carry prominent notices stating that it is released under this License and any conditions added under section 7. This requirement modifies the requirement in section 4 to "keep intact all notices". c) You must license the entire work, as a whole, under this License to anyone who comes into possession of a copy. This License will therefore apply, along with any applicable section 7 additional terms, to the whole of the work, and all its parts, regardless of how they are packaged. This License gives no permission to license the work in any other way, but it does not invalidate such permission if you have separately received it. d) If the work has interactive user interfaces, each must display Appropriate Legal Notices; however, if the Program has interactive interfaces that do not display Appropriate Legal Notices, your work need not make them do so. A compilation of a covered work with other separate and independent works, which are not by their nature extensions of the covered work, and which are not combined with it such as to form a larger program, in or on a volume of a storage or distribution medium, is called an "aggregate" if the compilation and its resulting copyright are not used to limit the access or legal rights of the compilation's users beyond what the individual works permit. Inclusion of a covered work in an aggregate does not cause this License to apply to the other parts of the aggregate. 6. Conveying Non-Source Forms. You may convey a covered work in object code form under the terms of sections 4 and 5, provided that you also convey the machine-readable Corresponding Source under the terms of this License, in one of these ways: a) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by the Corresponding Source fixed on a durable physical medium customarily used for software interchange. b) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by a written offer, valid for at least three years and valid for as long as you offer spare parts or customer support for that product model, to give anyone who possesses the object code either (1) a copy of the Corresponding Source for all the software in the product that is covered by this License, on a durable physical medium customarily used for software interchange, for a price no more than your reasonable cost of physically performing this conveying of source, or (2) access to copy the Corresponding Source from a network server at no charge. c) Convey individual copies of the object code with a copy of the written offer to provide the Corresponding Source. This alternative is allowed only occasionally and noncommercially, and only if you received the object code with such an offer, in accord with subsection 6b. d) Convey the object code by offering access from a designated place (gratis or for a charge), and offer equivalent access to the Corresponding Source in the same way through the same place at no further charge. You need not require recipients to copy the Corresponding Source along with the object code. If the place to copy the object code is a network server, the Corresponding Source may be on a different server (operated by you or a third party) that supports equivalent copying facilities, provided you maintain clear directions next to the object code saying where to find the Corresponding Source. Regardless of what server hosts the Corresponding Source, you remain obligated to ensure that it is available for as long as needed to satisfy these requirements. e) Convey the object code using peer-to-peer transmission, provided you inform other peers where the object code and Corresponding Source of the work are being offered to the general public at no charge under subsection 6d. A separable portion of the object code, whose source code is excluded from the Corresponding Source as a System Library, need not be included in conveying the object code work. A "User Product" is either (1) a "consumer product", which means any tangible personal property which is normally used for personal, family, or household purposes, or (2) anything designed or sold for incorporation into a dwelling. In determining whether a product is a consumer product, doubtful cases shall be resolved in favor of coverage. For a particular product received by a particular user, "normally used" refers to a typical or common use of that class of product, regardless of the status of the particular user or of the way in which the particular user actually uses, or expects or is expected to use, the product. A product is a consumer product regardless of whether the product has substantial commercial, industrial or non-consumer uses, unless such uses represent the only significant mode of use of the product. "Installation Information" for a User Product means any methods, procedures, authorization keys, or other information required to install and execute modified versions of a covered work in that User Product from a modified version of its Corresponding Source. The information must suffice to ensure that the continued functioning of the modified object code is in no case prevented or interfered with solely because modification has been made. If you convey an object code work under this section in, or with, or specifically for use in, a User Product, and the conveying occurs as part of a transaction in which the right of possession and use of the User Product is transferred to the recipient in perpetuity or for a fixed term (regardless of how the transaction is characterized), the Corresponding Source conveyed under this section must be accompanied by the Installation Information. But this requirement does not apply if neither you nor any third party retains the ability to install modified object code on the User Product (for example, the work has been installed in ROM). The requirement to provide Installation Information does not include a requirement to continue to provide support service, warranty, or updates for a work that has been modified or installed by the recipient, or for the User Product in which it has been modified or installed. Access to a network may be denied when the modification itself materially and adversely affects the operation of the network or violates the rules and protocols for communication across the network. Corresponding Source conveyed, and Installation Information provided, in accord with this section must be in a format that is publicly documented (and with an implementation available to the public in source code form), and must require no special password or key for unpacking, reading or copying. 7. Additional Terms. "Additional permissions" are terms that supplement the terms of this License by making exceptions from one or more of its conditions. Additional permissions that are applicable to the entire Program shall be treated as though they were included in this License, to the extent that they are valid under applicable law. If additional permissions apply only to part of the Program, that part may be used separately under those permissions, but the entire Program remains governed by this License without regard to the additional permissions. When you convey a copy of a covered work, you may at your option remove any additional permissions from that copy, or from any part of it. (Additional permissions may be written to require their own removal in certain cases when you modify the work.) You may place additional permissions on material, added by you to a covered work, for which you have or can give appropriate copyright permission. Notwithstanding any other provision of this License, for material you add to a covered work, you may (if authorized by the copyright holders of that material) supplement the terms of this License with terms: a) Disclaiming warranty or limiting liability differently from the terms of sections 15 and 16 of this License; or b) Requiring preservation of specified reasonable legal notices or author attributions in that material or in the Appropriate Legal Notices displayed by works containing it; or c) Prohibiting misrepresentation of the origin of that material, or requiring that modified versions of such material be marked in reasonable ways as different from the original version; or d) Limiting the use for publicity purposes of names of licensors or authors of the material; or e) Declining to grant rights under trademark law for use of some trade names, trademarks, or service marks; or f) Requiring indemnification of licensors and authors of that material by anyone who conveys the material (or modified versions of it) with contractual assumptions of liability to the recipient, for any liability that these contractual assumptions directly impose on those licensors and authors. All other non-permissive additional terms are considered "further restrictions" within the meaning of section 10. If the Program as you received it, or any part of it, contains a notice stating that it is governed by this License along with a term that is a further restriction, you may remove that term. If a license document contains a further restriction but permits relicensing or conveying under this License, you may add to a covered work material governed by the terms of that license document, provided that the further restriction does not survive such relicensing or conveying. If you add terms to a covered work in accord with this section, you must place, in the relevant source files, a statement of the additional terms that apply to those files, or a notice indicating where to find the applicable terms. Additional terms, permissive or non-permissive, may be stated in the form of a separately written license, or stated as exceptions; the above requirements apply either way. 8. Termination. You may not propagate or modify a covered work except as expressly provided under this License. Any attempt otherwise to propagate or modify it is void, and will automatically terminate your rights under this License (including any patent licenses granted under the third paragraph of section 11). However, if you cease all violation of this License, then your license from a particular copyright holder is reinstated (a) provisionally, unless and until the copyright holder explicitly and finally terminates your license, and (b) permanently, if the copyright holder fails to notify you of the violation by some reasonable means prior to 60 days after the cessation. Moreover, your license from a particular copyright holder is reinstated permanently if the copyright holder notifies you of the violation by some reasonable means, this is the first time you have received notice of violation of this License (for any work) from that copyright holder, and you cure the violation prior to 30 days after your receipt of the notice. Termination of your rights under this section does not terminate the licenses of parties who have received copies or rights from you under this License. If your rights have been terminated and not permanently reinstated, you do not qualify to receive new licenses for the same material under section 10. 9. Acceptance Not Required for Having Copies. You are not required to accept this License in order to receive or run a copy of the Program. Ancillary propagation of a covered work occurring solely as a consequence of using peer-to-peer transmission to receive a copy likewise does not require acceptance. However, nothing other than this License grants you permission to propagate or modify any covered work. These actions infringe copyright if you do not accept this License. Therefore, by modifying or propagating a covered work, you indicate your acceptance of this License to do so. 10. Automatic Licensing of Downstream Recipients. Each time you convey a covered work, the recipient automatically receives a license from the original licensors, to run, modify and propagate that work, subject to this License. You are not responsible for enforcing compliance by third parties with this License. An "entity transaction" is a transaction transferring control of an organization, or substantially all assets of one, or subdividing an organization, or merging organizations. If propagation of a covered work results from an entity transaction, each party to that transaction who receives a copy of the work also receives whatever licenses to the work the party's predecessor in interest had or could give under the previous paragraph, plus a right to possession of the Corresponding Source of the work from the predecessor in interest, if the predecessor has it or can get it with reasonable efforts. You may not impose any further restrictions on the exercise of the rights granted or affirmed under this License. For example, you may not impose a license fee, royalty, or other charge for exercise of rights granted under this License, and you may not initiate litigation (including a cross-claim or counterclaim in a lawsuit) alleging that any patent claim is infringed by making, using, selling, offering for sale, or importing the Program or any portion of it. 11. Patents. A "contributor" is a copyright holder who authorizes use under this License of the Program or a work on which the Program is based. The work thus licensed is called the contributor's "contributor version". A contributor's "essential patent claims" are all patent claims owned or controlled by the contributor, whether already acquired or hereafter acquired, that would be infringed by some manner, permitted by this License, of making, using, or selling its contributor version, but do not include claims that would be infringed only as a consequence of further modification of the contributor version. For purposes of this definition, "control" includes the right to grant patent sublicenses in a manner consistent with the requirements of this License. Each contributor grants you a non-exclusive, worldwide, royalty-free patent license under the contributor's essential patent claims, to make, use, sell, offer for sale, import and otherwise run, modify and propagate the contents of its contributor version. In the following three paragraphs, a "patent license" is any express agreement or commitment, however denominated, not to enforce a patent (such as an express permission to practice a patent or covenant not to sue for patent infringement). To "grant" such a patent license to a party means to make such an agreement or commitment not to enforce a patent against the party. If you convey a covered work, knowingly relying on a patent license, and the Corresponding Source of the work is not available for anyone to copy, free of charge and under the terms of this License, through a publicly available network server or other readily accessible means, then you must either (1) cause the Corresponding Source to be so available, or (2) arrange to deprive yourself of the benefit of the patent license for this particular work, or (3) arrange, in a manner consistent with the requirements of this License, to extend the patent license to downstream recipients. "Knowingly relying" means you have actual knowledge that, but for the patent license, your conveying the covered work in a country, or your recipient's use of the covered work in a country, would infringe one or more identifiable patents in that country that you have reason to believe are valid. If, pursuant to or in connection with a single transaction or arrangement, you convey, or propagate by procuring conveyance of, a covered work, and grant a patent license to some of the parties receiving the covered work authorizing them to use, propagate, modify or convey a specific copy of the covered work, then the patent license you grant is automatically extended to all recipients of the covered work and works based on it. A patent license is "discriminatory" if it does not include within the scope of its coverage, prohibits the exercise of, or is conditioned on the non-exercise of one or more of the rights that are specifically granted under this License. You may not convey a covered work if you are a party to an arrangement with a third party that is in the business of distributing software, under which you make payment to the third party based on the extent of your activity of conveying the work, and under which the third party grants, to any of the parties who would receive the covered work from you, a discriminatory patent license (a) in connection with copies of the covered work conveyed by you (or copies made from those copies), or (b) primarily for and in connection with specific products or compilations that contain the covered work, unless you entered into that arrangement, or that patent license was granted, prior to 28 March 2007. Nothing in this License shall be construed as excluding or limiting any implied license or other defenses to infringement that may otherwise be available to you under applicable patent law. 12. No Surrender of Others' Freedom. If conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot convey a covered work so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not convey it at all. For example, if you agree to terms that obligate you to collect a royalty for further conveying from those to whom you convey the Program, the only way you could satisfy both those terms and this License would be to refrain entirely from conveying the Program. 13. Remote Network Interaction; Use with the GNU General Public License. Notwithstanding any other provision of this License, if you modify the Program, your modified version must prominently offer all users interacting with it remotely through a computer network (if your version supports such interaction) an opportunity to receive the Corresponding Source of your version by providing access to the Corresponding Source from a network server at no charge, through some standard or customary means of facilitating copying of software. This Corresponding Source shall include the Corresponding Source for any work covered by version 3 of the GNU General Public License that is incorporated pursuant to the following paragraph. Notwithstanding any other provision of this License, you have permission to link or combine any covered work with a work licensed under version 3 of the GNU General Public License into a single combined work, and to convey the resulting work. The terms of this License will continue to apply to the part which is the covered work, but the work with which it is combined will remain governed by version 3 of the GNU General Public License. 14. Revised Versions of this License. The Free Software Foundation may publish revised and/or new versions of the GNU Affero General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Program specifies that a certain numbered version of the GNU Affero General Public License "or any later version" applies to it, you have the option of following the terms and conditions either of that numbered version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of the GNU Affero General Public License, you may choose any version ever published by the Free Software Foundation. If the Program specifies that a proxy can decide which future versions of the GNU Affero General Public License can be used, that proxy's public statement of acceptance of a version permanently authorizes you to choose that version for the Program. Later license versions may give you additional or different permissions. However, no additional obligations are imposed on any author or copyright holder as a result of your choosing to follow a later version. 15. Disclaimer of Warranty. THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 16. Limitation of Liability. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. 17. Interpretation of Sections 15 and 16. If the disclaimer of warranty and limitation of liability provided above cannot be given local legal effect according to their terms, reviewing courts shall apply local law that most closely approximates an absolute waiver of all civil liability in connection with the Program, unless a warranty or assumption of liability accompanies a copy of the Program in return for a fee. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Programs If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms. To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively state the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. Copyright (C) This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more details. You should have received a copy of the GNU Affero General Public License along with this program. If not, see . Also add information on how to contact you by electronic and paper mail. If your software can interact with users remotely through a computer network, you should also make sure that it provides a way for users to get its source. For example, if your program is a web application, its interface could display a "Source" link that leads users to an archive of the code. There are many ways you could offer source, and different solutions will be better for different programs; see section 13 for the specific requirements. You should also get your employer (if you work as a programmer) or school, if any, to sign a "copyright disclaimer" for the program, if necessary. For more information on this, and how to apply and follow the GNU AGPL, see . ================================================ FILE: Makefile.am ================================================ MAKE_CLIENT = if WIN32 MAKE_CONTROLLER = else MAKE_CONTROLLER = controller endif if COMPILE_FUSE MAKE_FUSE = fuse else MAKE_FUSE = endif MAKE_SERVER = server tools $(MAKE_CONTROLLER) $(MAKE_FUSE) SUBDIRS = include lib common python $(MAKE_SERVER) doc scripts DIST_SUBDIRS = include lib common python server tools controller fuse doc scripts INTLTOOL = \ intltool-extract.in \ intltool-merge.in \ intltool-update.in EXTRA_DIST = install-sh $(INTLTOOL) README.markdown scripts LICENSE.txt ACLOCAL_AMFLAGS = -I m4 dist-hook: git log --format='%H' -1 > $(distdir)/latest_commit ================================================ FILE: README.markdown ================================================ Seafile Server Core [![Build Status](https://secure.travis-ci.org/haiwen/seafile-server.svg?branch=master)](http://travis-ci.org/haiwen/seafile-server) ============ Seafile is an open source cloud storage system with features on privacy protection and teamwork. Collections of files are called libraries, and each library can be synced separately. A library can also be encrypted with a user chosen password. Seafile also allows users to create groups and easily sharing files into groups. This is the core component of Seafile server. It provides RPC to the web front-end (Seahub) to access files, and provides HTTP APIs to the desktop clients for syncing files. Build and Run ============= See Contributing =========== For more informations read [Contribution](https://manual.seafile.com/contribution/). License ======= The Seafile server core is published under AGPLv3. Other components of Seafile have different licenses. Please refer to the coresponding projects. Contact ======= Twitter: @seafile Forum: ================================================ FILE: README.testing.md ================================================ # Seafile Server Tests ## Run it locally To run the tests, you need to install pytest first: ```sh pip install -r ci/requirements.txt ``` Compile and install ccnet-server and seafile-server ``` cd ccnet-server make sudo make install cd seafile-server make sudo make install ``` Then run the tests with ```sh cd seafile-server ./run_tests.sh ``` By default the test script would try to start ccnet-server and seaf-server in `/usr/local/bin`, if you `make install` to another location, say `/opt/local`, run it like this: ```sh SEAFILE_INSTALL_PREFIX=/opt/local ./run_tests.sh ``` ================================================ FILE: autogen.sh ================================================ #!/bin/bash # Run this to generate all the initial makefiles, etc. : ${AUTOCONF=autoconf} : ${AUTOHEADER=autoheader} : ${AUTOMAKE=automake} : ${ACLOCAL=aclocal} if test "$(uname)" != "Darwin"; then : ${LIBTOOLIZE=libtoolize} else : ${LIBTOOLIZE=glibtoolize} fi : ${INTLTOOLIZE=intltoolize} : ${LIBTOOL=libtool} srcdir=`dirname $0` test -z "$srcdir" && srcdir=. ORIGDIR=`pwd` cd $srcdir PROJECT=ccnet TEST_TYPE=-f FILE=net/main.c CONFIGURE=configure.ac DIE=0 ($AUTOCONF --version) < /dev/null > /dev/null 2>&1 || { echo echo "You must have autoconf installed to compile $PROJECT." echo "Download the appropriate package for your distribution," echo "or get the source tarball at ftp://ftp.gnu.org/pub/gnu/" DIE=1 } (grep "^AC_PROG_INTLTOOL" $srcdir/$CONFIGURE >/dev/null) && { ($INTLTOOLIZE --version) < /dev/null > /dev/null 2>&1 || { echo echo "You must have \`intltoolize' installed to compile $PROJECT." echo "Get ftp://ftp.gnome.org/pub/GNOME/stable/sources/intltool/intltool-0.22.tar.gz" echo "(or a newer version if it is available)" DIE=1 } } ($AUTOMAKE --version) < /dev/null > /dev/null 2>&1 || { echo echo "You must have automake installed to compile $PROJECT." echo "Get ftp://sourceware.cygnus.com/pub/automake/automake-1.7.tar.gz" echo "(or a newer version if it is available)" DIE=1 } if test "$(uname)" != "Darwin"; then (grep "^AC_PROG_LIBTOOL" $CONFIGURE >/dev/null) && { ($LIBTOOL --version) < /dev/null > /dev/null 2>&1 || { echo echo "**Error**: You must have \`libtool' installed to compile $PROJECT." echo "Get ftp://ftp.gnu.org/pub/gnu/libtool-1.4.tar.gz" echo "(or a newer version if it is available)" DIE=1 } } fi if grep "^AM_[A-Z0-9_]\{1,\}_GETTEXT" "$CONFIGURE" >/dev/null; then if grep "sed.*POTFILES" "$CONFIGURE" >/dev/null; then GETTEXTIZE="" else if grep "^AM_GLIB_GNU_GETTEXT" "$CONFIGURE" >/dev/null; then GETTEXTIZE="glib-gettextize" GETTEXTIZE_URL="ftp://ftp.gtk.org/pub/gtk/v2.0/glib-2.0.0.tar.gz" else GETTEXTIZE="gettextize" GETTEXTIZE_URL="ftp://alpha.gnu.org/gnu/gettext-0.10.35.tar.gz" fi $GETTEXTIZE --version < /dev/null > /dev/null 2>&1 if test $? -ne 0; then echo echo "**Error**: You must have \`$GETTEXTIZE' installed to compile $PKG_NAME." echo "Get $GETTEXTIZE_URL" echo "(or a newer version if it is available)" DIE=1 fi fi fi if test "$DIE" -eq 1; then exit 1 fi dr=`dirname .` echo processing $dr aclocalinclude="$aclocalinclude -I m4" if test x"$MSYSTEM" = x"MINGW32"; then aclocalinclude="$aclocalinclude -I /mingw32/share/aclocal" elif test "$(uname)" = "Darwin"; then aclocalinclude="$aclocalinclude -I /opt/local/share/aclocal" fi echo "Creating $dr/aclocal.m4 ..." test -r $dr/aclocal.m4 || touch $dr/aclocal.m4 echo "Running glib-gettextize... Ignore non-fatal messages." echo "no" | glib-gettextize --force --copy echo "Making $dr/aclocal.m4 writable ..." test -r $dr/aclocal.m4 && chmod u+w $dr/aclocal.m4 echo "Running intltoolize..." intltoolize --copy --force --automake echo "Running $LIBTOOLIZE..." $LIBTOOLIZE --force --copy echo "Running $ACLOCAL $aclocalinclude ..." $ACLOCAL $aclocalinclude echo "Running $AUTOHEADER..." $AUTOHEADER echo "Running $AUTOMAKE --gnu $am_opt ..." $AUTOMAKE --add-missing --gnu $am_opt echo "Running $AUTOCONF ..." $AUTOCONF ================================================ FILE: ci/install-deps.sh ================================================ #!/bin/bash set -e -x SCRIPT=${BASH_SOURCE[0]} TESTS_DIR=$(dirname "${SCRIPT}")/.. SETUP_DIR=${TESTS_DIR}/ci cd $SETUP_DIR sudo systemctl start mysql.service sudo apt-get update --fix-missing sudo apt-get install -y intltool libarchive-dev libcurl4-openssl-dev libevent-dev \ libfuse-dev libglib2.0-dev libjansson-dev libmysqlclient-dev libonig-dev \ sqlite3 libsqlite3-dev libtool net-tools uuid-dev valac libargon2-dev sudo systemctl start mysql.service pip install -r requirements.txt ================================================ FILE: ci/requirements.txt ================================================ termcolor>=1.1.0 requests>=2.8.0 pytest>=3.3.2 backports.functools_lru_cache>=1.4 tenacity>=4.8.0 future requests-toolbelt ================================================ FILE: ci/run.py ================================================ #!/usr/bin/env python """ Install dir: ~/opt/local Data dir: /tmp/haiwen """ import argparse import glob import json import logging import os import re import sys from os.path import abspath, basename, exists, expanduser, join import requests import termcolor import site from serverctl import ServerCtl from utils import ( cd, chdir, debug, green, info, lru_cache, mkdirs, on_github_actions, red, setup_logging, shell, warning ) logger = logging.getLogger(__name__) TOPDIR = abspath(join(os.getcwd(), '..')) if on_github_actions(): PREFIX = expanduser('~/opt/local') else: PREFIX = os.environ.get('SEAFILE_INSTALL_PREFIX', '/usr/local') INSTALLDIR = '/tmp/seafile-tests' def num_jobs(): return int(os.environ.get('NUM_JOBS', 2)) @lru_cache() def make_build_env(): env = dict(os.environ) libsearpc_dir = abspath(join(TOPDIR, 'libsearpc')) ccnet_dir = abspath(join(TOPDIR, 'ccnet-server')) def _env_add(*a, **kw): kw['env'] = env return prepend_env_value(*a, **kw) _env_add('CPPFLAGS', '-I%s' % join(PREFIX, 'include'), seperator=' ') _env_add('LDFLAGS', '-L%s' % join(PREFIX, 'lib'), seperator=' ') _env_add('LDFLAGS', '-L%s' % join(PREFIX, 'lib64'), seperator=' ') _env_add('PATH', join(PREFIX, 'bin')) py_version = '.'.join(map(str, sys.version_info[:3])) if on_github_actions(): _env_add('PYTHONPATH', join(os.environ.get('RUNNER_TOOL_CACHE'), 'Python/{py_version}/x64/lib/python3.12/site-packages')) _env_add('PYTHONPATH', join(PREFIX, 'lib/python3.12/site-packages')) _env_add('PKG_CONFIG_PATH', join(PREFIX, 'lib', 'pkgconfig')) _env_add('PKG_CONFIG_PATH', join(PREFIX, 'lib64', 'pkgconfig')) _env_add('PKG_CONFIG_PATH', libsearpc_dir) _env_add('PKG_CONFIG_PATH', ccnet_dir) _env_add('LD_LIBRARY_PATH', join(PREFIX, 'lib')) _env_add('JWT_PRIVATE_KEY', '@%ukmcl$k=9u-grs4azdljk(sn0kd!=mzc17xd7x8#!u$1x@kl') _env_add('SEAFILE_MYSQL_DB_CCNET_DB_NAME', 'ccnet') # Prepend the seafile-server/python to PYTHONPATH so we don't need to "make # install" each time after editing python files. _env_add('PYTHONPATH', join(SeafileServer().projectdir, 'python')) for key in ('PATH', 'PKG_CONFIG_PATH', 'CPPFLAGS', 'LDFLAGS', 'PYTHONPATH'): info('%s: %s', key, env.get(key, '')) return env def prepend_env_value(name, value, seperator=':', env=None): '''append a new value to a list''' env = env or os.environ current_value = env.get(name, '') new_value = value if current_value: new_value += seperator + current_value env[name] = new_value return env @lru_cache() def get_branch_json_file(): url = 'https://raw.githubusercontent.com/haiwen/seafile-test-deploy/master/branches.json' return requests.get(url).json() def get_project_branch(project, default_branch='master'): travis_branch = os.environ.get('TRAVIS_BRANCH', 'master') if project.name == 'seafile-server': return travis_branch conf = get_branch_json_file() return conf.get(travis_branch, {}).get(project.name, default_branch) class Project(object): def __init__(self, name): self.name = name self.version = '' @property def url(self): return 'https://www.github.com/haiwen/{}.git'.format(self.name) @property def projectdir(self): return join(TOPDIR, self.name) def branch(self): return get_project_branch(self) def clone(self): if exists(self.name): with cd(self.name): shell('git fetch origin --tags') else: shell( 'git clone --depth=1 --branch {} {}'. format(self.branch(), self.url) ) @chdir def compile_and_install(self): cmds = [ './autogen.sh', './configure --prefix={}'.format(PREFIX), 'make -j{} V=0'.format(num_jobs()), 'make install', ] for cmd in cmds: shell(cmd) @chdir def use_branch(self, branch): shell('git checkout {}'.format(branch)) class Libsearpc(Project): def __init__(self): super(Libsearpc, self).__init__('libsearpc') def branch(self): return 'master' class CcnetServer(Project): def __init__(self): super(CcnetServer, self).__init__('ccnet-server') def branch(self): return '7.1' class SeafileServer(Project): def __init__(self): super(SeafileServer, self).__init__('seafile-server') class Libevhtp(Project): def __init__(self): super(Libevhtp, self).__init__('libevhtp') def branch(self): return 'master' @chdir def compile_and_install(self): cmds = [ 'cmake -DEVHTP_DISABLE_SSL=ON -DEVHTP_BUILD_SHARED=OFF -DCMAKE_POLICY_VERSION_MINIMUM=3.5 .', 'make', 'sudo make install', 'sudo ldconfig', ] for cmd in cmds: shell(cmd) class Libjwt(Project): def __init__(self): super(Libjwt, self).__init__('libjwt') def branch(self): return 'v1.13.1' @property def url(self): return 'https://www.github.com/benmcollins/libjwt.git' @chdir def compile_and_install(self): cmds = [ 'autoreconf -i', './configure', 'sudo make all', 'sudo make install', ] for cmd in cmds: shell(cmd) class Libhiredis(Project): def __init__(self): super(Libhiredis, self).__init__('hiredis') def branch(self): return 'v1.1.0' @property def url(self): return 'https://github.com/redis/hiredis.git' @chdir def compile_and_install(self): cmds = [ 'sudo make', 'sudo make install', ] for cmd in cmds: shell(cmd) def fetch_and_build(): libsearpc = Libsearpc() libjwt = Libjwt() libhiredis = Libhiredis() libevhtp = Libevhtp() ccnet = CcnetServer() seafile = SeafileServer() libsearpc.clone() libjwt.clone() libhiredis.clone() libevhtp.clone() ccnet.clone() libsearpc.compile_and_install() libjwt.compile_and_install() libhiredis.compile_and_install() libevhtp.compile_and_install() seafile.compile_and_install() def parse_args(): ap = argparse.ArgumentParser() ap.add_argument('-v', '--verbose', action='store_true') ap.add_argument('-t', '--test-only', action='store_true') return ap.parse_args() def main(): mkdirs(INSTALLDIR) os.environ.update(make_build_env()) args = parse_args() if on_github_actions() and not args.test_only: fetch_and_build() dbs = ('mysql',) for db in dbs: start_and_test_with_db(db) def start_and_test_with_db(db): if db == 'sqlite3': fileservers = ('c_fileserver',) else: fileservers = ('go_fileserver', 'c_fileserver') for fileserver in fileservers: shell('rm -rf {}/*'.format(INSTALLDIR)) info('Setting up seafile server with %s database, use %s', db, fileserver) server = ServerCtl( TOPDIR, SeafileServer().projectdir, INSTALLDIR, fileserver, db=db, # Use the newly built seaf-server (to avoid "make install" each time when developping locally) seaf_server_bin=join(SeafileServer().projectdir, 'server/seaf-server') ) server.setup() with server.run(): info('Testing with %s database', db) with cd(SeafileServer().projectdir): shell('py.test', env=server.get_seaserv_envs()) if __name__ == '__main__': os.chdir(TOPDIR) setup_logging() main() ================================================ FILE: ci/serverctl.py ================================================ #!/usr/bin/env python #coding: UTF-8 import argparse import glob import logging import os import re import sys from collections import namedtuple from contextlib import contextmanager from os.path import abspath, basename, dirname, exists, join import requests from tenacity import TryAgain, retry, stop_after_attempt, wait_fixed from utils import ( cd, chdir, debug, green, info, mkdirs, red, setup_logging, shell, warning ) logger = logging.getLogger(__name__) class ServerCtl(object): def __init__(self, topdir, projectdir, datadir, fileserver, db='sqlite3', seaf_server_bin='seaf-server', ccnet_server_bin='ccnet-server'): self.db = db self.topdir = topdir self.datadir = datadir self.central_conf_dir = join(datadir, 'conf') self.seafile_conf_dir = join(datadir, 'seafile-data') self.ccnet_conf_dir = join(datadir, 'ccnet') self.log_dir = join(datadir, 'logs') mkdirs(self.log_dir) self.ccnet_log = join(self.log_dir, 'ccnet.log') self.seafile_log = join(self.log_dir, 'seafile.log') self.fileserver_log = join(self.log_dir, 'fileserver.log') self.ccnet_server_bin = ccnet_server_bin self.seaf_server_bin = seaf_server_bin self.sql_dir = join(topdir, 'seafile-server', 'scripts', 'sql') self.ccnet_proc = None self.seafile_proc = None self.fileserver_proc = None self.projectdir = projectdir self.fileserver = fileserver def setup(self): if self.db == 'mysql': create_mysql_dbs() os.mkdir (self.central_conf_dir, 0o755) os.mkdir (self.seafile_conf_dir, 0o755) os.mkdir (self.ccnet_conf_dir, 0o755) self.init_seafile() def init_seafile(self): seafile_conf = join(self.central_conf_dir, 'seafile.conf') if self.fileserver == 'go_fileserver': seafile_fileserver_conf = '''\ [fileserver] use_go_fileserver = true port=8082 ''' else: seafile_fileserver_conf = '''\ [fileserver] port=8082 ''' with open(seafile_conf, 'a+') as fp: fp.write('\n') fp.write(seafile_fileserver_conf) if self.db == 'mysql': self.add_seafile_db_conf() else: self.add_seafile_sqlite_db_conf() def add_seafile_sqlite_db_conf(self): seafile_conf = join(self.central_conf_dir, 'seafile.conf') seafile_db_conf = '''\ [database] ''' with open(seafile_conf, 'a+') as fp: fp.write('\n') fp.write(seafile_db_conf) def add_seafile_db_conf(self): seafile_conf = join(self.central_conf_dir, 'seafile.conf') seafile_db_conf = '''\ [database] type = mysql host = 127.0.0.1 port = 3306 user = seafile password = seafile db_name = seafile connection_charset = utf8 ''' with open(seafile_conf, 'a+') as fp: fp.write('\n') fp.write(seafile_db_conf) @contextmanager def run(self): try: self.start() yield self except: self.print_logs() raise finally: self.stop() def print_logs(self): for logfile in self.ccnet_log, self.seafile_log: if exists(logfile): shell(f'cat {logfile}') @retry(wait=wait_fixed(1), stop=stop_after_attempt(10)) def wait_ccnet_ready(self): if not exists(join(self.ccnet_conf_dir, 'ccnet-rpc.sock')): raise TryAgain def start(self): logger.info('Starting to create ccnet and seafile db tables') self.create_database_tables() logger.info('Starting seafile server') self.start_seafile() self.start_fileserver() def create_database_tables(self): if self.db == 'mysql': ccnet_sql_path = join(self.sql_dir, 'mysql', 'ccnet.sql') seafile_sql_path = join(self.sql_dir, 'mysql', 'seafile.sql') sql = f'USE ccnet; source {ccnet_sql_path}; USE seafile; source {seafile_sql_path};'.encode() shell('sudo mysql -u root -proot', inputdata=sql, wait=False) else: config_sql_path = join(self.sql_dir, 'sqlite', 'config.sql') groupmgr_sql_path = join(self.sql_dir, 'sqlite', 'groupmgr.sql') org_sql_path = join(self.sql_dir, 'sqlite', 'org.sql') user_sql_path = join(self.sql_dir, 'sqlite', 'user.sql') seafile_sql_path = join(self.sql_dir, 'sqlite', 'seafile.sql') misc_dir = join(self.ccnet_conf_dir, 'misc') os.mkdir (misc_dir, 0o755) groupmgr_dir = join(self.ccnet_conf_dir, 'GroupMgr') os.mkdir (groupmgr_dir, 0o755) orgmgr_dir = join(self.ccnet_conf_dir, 'OrgMgr') os.mkdir (orgmgr_dir, 0o755) usermgr_dir = join(self.ccnet_conf_dir, 'PeerMgr') os.mkdir (usermgr_dir, 0o755) config_db_path = join(misc_dir, 'config.db') groupmgr_db_path = join(groupmgr_dir, 'groupmgr.db') orgmgr_db_path = join(orgmgr_dir, 'orgmgr.db') usermgr_db_path = join(usermgr_dir, 'usermgr.db') seafile_db_path = join(self.seafile_conf_dir, 'seafile.db') sql = f'.read {config_sql_path}'.encode() shell('sqlite3 ' + config_db_path, inputdata=sql, wait=False) sql = f'.read {groupmgr_sql_path}'.encode() shell('sqlite3 ' + groupmgr_db_path, inputdata=sql, wait=False) sql = f'.read {org_sql_path}'.encode() shell('sqlite3 ' + orgmgr_db_path, inputdata=sql, wait=False) sql = f'.read {user_sql_path}'.encode() shell('sqlite3 ' + usermgr_db_path, inputdata=sql, wait=False) sql = f'.read {seafile_sql_path}'.encode() shell('sqlite3 ' + seafile_db_path, inputdata=sql, wait=False) def start_ccnet(self): cmd = [ self.ccnet_server_bin, "-F", self.central_conf_dir, "-c", self.ccnet_conf_dir, "-f", self.ccnet_log, ] self.ccnet_proc = shell(cmd, wait=False) def start_seafile(self): cmd = [ self.seaf_server_bin, "-F", self.central_conf_dir, "-c", self.ccnet_conf_dir, "-d", self.seafile_conf_dir, "-l", self.seafile_log, "-f", ] self.seafile_proc = shell(cmd, wait=False) def start_fileserver(self): cmd = [ "./fileserver", "-F", self.central_conf_dir, "-d", self.seafile_conf_dir, "-l", self.fileserver_log, ] fileserver_path = join(self.projectdir, 'fileserver') with cd(fileserver_path): shell("go build") self.fileserver_proc = shell(cmd, wait=False) def stop(self): if self.ccnet_proc: logger.info('Stopping ccnet server') self.ccnet_proc.kill() if self.seafile_proc: logger.info('Stopping seafile server') self.seafile_proc.kill() if self.fileserver_proc: logger.info('Stopping go fileserver') self.fileserver_proc.kill() if self.db == 'mysql': del_mysql_dbs() def get_seaserv_envs(self): envs = dict(os.environ) envs.update({ 'SEAFILE_CENTRAL_CONF_DIR': self.central_conf_dir, 'CCNET_CONF_DIR': self.ccnet_conf_dir, 'SEAFILE_CONF_DIR': self.seafile_conf_dir, 'SEAFILE_MYSQL_DB_CCNET_DB_NAME': 'ccnet', }) return envs def create_mysql_dbs(): sql = b'''\ create database `ccnet` character set = 'utf8'; create database `seafile` character set = 'utf8'; create user 'seafile'@'localhost' identified by 'seafile'; GRANT ALL PRIVILEGES ON `ccnet`.* to `seafile`@localhost; GRANT ALL PRIVILEGES ON `seafile`.* to `seafile`@localhost; ''' shell('sudo mysql -u root -proot', inputdata=sql) def del_mysql_dbs(): sql = b'''\ drop database `ccnet`; drop database `seafile`; drop user 'seafile'@'localhost'; ''' shell('sudo mysql -u root -proot', inputdata=sql) ================================================ FILE: ci/utils.py ================================================ #coding: UTF-8 import logging import os import re import sys from contextlib import contextmanager from os.path import abspath, basename, exists, expanduser, join from subprocess import PIPE, CalledProcessError, Popen import requests import termcolor try: from functools import lru_cache except ImportError: from backports.functools_lru_cache import lru_cache logger = logging.getLogger(__name__) def _color(s, color): return s if not os.isatty(sys.stdout.fileno()) \ else termcolor.colored(str(s), color) def green(s): return _color(s, 'green') def red(s): return _color(s, 'red') def debug(fmt, *a): logger.debug(green(fmt), *a) def info(fmt, *a): logger.info(green(fmt), *a) def warning(fmt, *a): logger.warn(red(fmt), *a) def shell(cmd, inputdata=None, wait=True, **kw): info('calling "%s" in %s', cmd, kw.get('cwd', os.getcwd())) kw['shell'] = not isinstance(cmd, list) kw['stdin'] = PIPE if inputdata else None p = Popen(cmd, **kw) if inputdata: p.communicate(inputdata) if wait: p.wait() if p.returncode: raise CalledProcessError(p.returncode, cmd) else: return p @contextmanager def cd(path): olddir = os.getcwd() os.chdir(path) try: yield finally: os.chdir(olddir) def chdir(func): def wrapped(self, *w, **kw): with cd(self.projectdir): return func(self, *w, **kw) return wrapped def setup_logging(): kw = { 'format': '[%(asctime)s][%(module)s]: %(message)s', 'datefmt': '%m/%d/%Y %H:%M:%S', 'level': logging.DEBUG, 'stream': sys.stdout, } logging.basicConfig(**kw) logging.getLogger('requests.packages.urllib3.connectionpool' ).setLevel(logging.WARNING) def mkdirs(*paths): for path in paths: if not exists(path): os.mkdir(path) def on_github_actions(): return 'GITHUB_ACTIONS' in os.environ @contextmanager def cd(path): path = expanduser(path) olddir = os.getcwd() os.chdir(path) try: yield finally: os.chdir(olddir) ================================================ FILE: common/Makefile.am ================================================ SUBDIRS = cdc proc_headers = \ $(addprefix processors/, \ objecttx-common.h) noinst_HEADERS = \ diff-simple.h \ seafile-crypt.h \ password-hash.h \ common.h \ branch-mgr.h \ fs-mgr.h \ block-mgr.h \ commit-mgr.h \ log.h \ object-list.h \ vc-common.h \ seaf-utils.h \ obj-store.h \ obj-backend.h \ block-backend.h \ block.h \ mq-mgr.h \ seaf-db.h \ config-mgr.h \ merge-new.h \ block-tx-utils.h \ sync-repo-common.h \ $(proc_headers) ================================================ FILE: common/block-backend-fs.c ================================================ /* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */ #ifndef _WIN32_WINNT #define _WIN32_WINNT 0x500 #endif #include "common.h" #include "utils.h" #include "log.h" #include #include #include #include "block-backend.h" #include "obj-store.h" struct _BHandle { char *store_id; int version; char block_id[41]; int fd; int rw_type; char *tmp_file; }; typedef struct { char *block_dir; int block_dir_len; char *tmp_dir; int tmp_dir_len; } FsPriv; static char * get_block_path (BlockBackend *bend, const char *block_sha1, char path[], const char *store_id, int version); static int open_tmp_file (BlockBackend *bend, const char *basename, char **path); static BHandle * block_backend_fs_open_block (BlockBackend *bend, const char *store_id, int version, const char *block_id, int rw_type) { BHandle *handle; int fd = -1; char *tmp_file; g_return_val_if_fail (block_id != NULL, NULL); g_return_val_if_fail (strlen(block_id) == 40, NULL); g_return_val_if_fail (rw_type == BLOCK_READ || rw_type == BLOCK_WRITE, NULL); if (rw_type == BLOCK_READ) { char path[SEAF_PATH_MAX]; get_block_path (bend, block_id, path, store_id, version); fd = g_open (path, O_RDONLY | O_BINARY, 0); if (fd < 0) { ccnet_warning ("[block bend] failed to open block %s for read: %s\n", block_id, strerror(errno)); return NULL; } } else { fd = open_tmp_file (bend, block_id, &tmp_file); if (fd < 0) { ccnet_warning ("[block bend] failed to open block %s for write: %s\n", block_id, strerror(errno)); return NULL; } } handle = g_new0(BHandle, 1); handle->fd = fd; memcpy (handle->block_id, block_id, 41); handle->rw_type = rw_type; if (rw_type == BLOCK_WRITE) handle->tmp_file = tmp_file; if (store_id) handle->store_id = g_strdup(store_id); handle->version = version; return handle; } static int block_backend_fs_read_block (BlockBackend *bend, BHandle *handle, void *buf, int len) { int ret; ret = readn (handle->fd, buf, len); if (ret < 0) seaf_warning ("Failed to read block %s:%s: %s.\n", handle->store_id, handle->block_id, strerror (errno)); return ret; } static int block_backend_fs_write_block (BlockBackend *bend, BHandle *handle, const void *buf, int len) { int ret; ret = writen (handle->fd, buf, len); if (ret < 0) seaf_warning ("Failed to write block %s:%s: %s.\n", handle->store_id, handle->block_id, strerror (errno)); return ret; } static int block_backend_fs_close_block (BlockBackend *bend, BHandle *handle) { int ret; ret = close (handle->fd); return ret; } static void block_backend_fs_block_handle_free (BlockBackend *bend, BHandle *handle) { if (handle->rw_type == BLOCK_WRITE) { /* make sure the tmp file is removed even on failure. */ g_unlink (handle->tmp_file); g_free (handle->tmp_file); } g_free (handle->store_id); g_free (handle); } static int create_parent_path (const char *path) { char *dir = g_path_get_dirname (path); if (!dir) return -1; if (g_file_test (dir, G_FILE_TEST_EXISTS)) { g_free (dir); return 0; } if (g_mkdir_with_parents (dir, 0777) < 0) { seaf_warning ("Failed to create object parent path: %s.\n", dir); g_free (dir); return -1; } g_free (dir); return 0; } static int block_backend_fs_commit_block (BlockBackend *bend, BHandle *handle) { char path[SEAF_PATH_MAX]; g_return_val_if_fail (handle->rw_type == BLOCK_WRITE, -1); get_block_path (bend, handle->block_id, path, handle->store_id, handle->version); if (create_parent_path (path) < 0) { seaf_warning ("Failed to create path for block %s:%s.\n", handle->store_id, handle->block_id); return -1; } if (g_rename (handle->tmp_file, path) < 0) { seaf_warning ("[block bend] failed to commit block %s:%s: %s\n", handle->store_id, handle->block_id, strerror(errno)); return -1; } return 0; } static gboolean block_backend_fs_block_exists (BlockBackend *bend, const char *store_id, int version, const char *block_sha1) { char block_path[SEAF_PATH_MAX]; get_block_path (bend, block_sha1, block_path, store_id, version); if (g_access (block_path, F_OK) == 0) return TRUE; else return FALSE; } static int block_backend_fs_remove_block (BlockBackend *bend, const char *store_id, int version, const char *block_id) { char path[SEAF_PATH_MAX]; get_block_path (bend, block_id, path, store_id, version); return g_unlink (path); } static BMetadata * block_backend_fs_stat_block (BlockBackend *bend, const char *store_id, int version, const char *block_id) { char path[SEAF_PATH_MAX]; SeafStat st; BMetadata *block_md; get_block_path (bend, block_id, path, store_id, version); if (seaf_stat (path, &st) < 0) { seaf_warning ("[block bend] Failed to stat block %s:%s at %s: %s.\n", store_id, block_id, path, strerror(errno)); return NULL; } block_md = g_new0(BMetadata, 1); memcpy (block_md->id, block_id, 40); block_md->size = (uint32_t) st.st_size; return block_md; } static BMetadata * block_backend_fs_stat_block_by_handle (BlockBackend *bend, BHandle *handle) { SeafStat st; BMetadata *block_md; if (seaf_fstat (handle->fd, &st) < 0) { seaf_warning ("[block bend] Failed to stat block %s:%s.\n", handle->store_id, handle->block_id); return NULL; } block_md = g_new0(BMetadata, 1); memcpy (block_md->id, handle->block_id, 40); block_md->size = (uint32_t) st.st_size; return block_md; } static int block_backend_fs_foreach_block (BlockBackend *bend, const char *store_id, int version, SeafBlockFunc process, void *user_data) { FsPriv *priv = bend->be_priv; char *block_dir = NULL; int dir_len; GDir *dir1 = NULL, *dir2; const char *dname1, *dname2; char block_id[128]; char path[SEAF_PATH_MAX], *pos; int ret = 0; #if defined MIGRATION if (version > 0) block_dir = g_build_filename (priv->block_dir, store_id, NULL); #else block_dir = g_build_filename (priv->block_dir, store_id, NULL); #endif dir_len = strlen (block_dir); dir1 = g_dir_open (block_dir, 0, NULL); if (!dir1) { goto out; } memcpy (path, block_dir, dir_len); pos = path + dir_len; while ((dname1 = g_dir_read_name(dir1)) != NULL) { snprintf (pos, sizeof(path) - dir_len, "/%s", dname1); dir2 = g_dir_open (path, 0, NULL); if (!dir2) { seaf_warning ("Failed to open block dir %s.\n", path); continue; } while ((dname2 = g_dir_read_name(dir2)) != NULL) { snprintf (block_id, sizeof(block_id), "%s%s", dname1, dname2); if (!process (store_id, version, block_id, user_data)) { g_dir_close (dir2); goto out; } } g_dir_close (dir2); } out: if (dir1) g_dir_close (dir1); g_free (block_dir); return ret; } static int block_backend_fs_copy (BlockBackend *bend, const char *src_store_id, int src_version, const char *dst_store_id, int dst_version, const char *block_id) { char src_path[SEAF_PATH_MAX]; char dst_path[SEAF_PATH_MAX]; get_block_path (bend, block_id, src_path, src_store_id, src_version); get_block_path (bend, block_id, dst_path, dst_store_id, dst_version); if (g_file_test (dst_path, G_FILE_TEST_EXISTS)) return 0; if (create_parent_path (dst_path) < 0) { seaf_warning ("Failed to create dst path %s for block %s.\n", dst_path, block_id); return -1; } #ifdef WIN32 if (!CreateHardLink (dst_path, src_path, NULL)) { seaf_warning ("Failed to link %s to %s: %lu.\n", src_path, dst_path, GetLastError()); return -1; } return 0; #else int ret = link (src_path, dst_path); if (ret < 0 && errno != EEXIST) { seaf_warning ("Failed to link %s to %s: %s.\n", src_path, dst_path, strerror(errno)); return -1; } return ret; #endif } static int block_backend_fs_remove_store (BlockBackend *bend, const char *store_id) { FsPriv *priv = bend->be_priv; char *block_dir = NULL; GDir *dir1, *dir2; const char *dname1, *dname2; char *path1, *path2; block_dir = g_build_filename (priv->block_dir, store_id, NULL); dir1 = g_dir_open (block_dir, 0, NULL); if (!dir1) { g_free (block_dir); return 0; } while ((dname1 = g_dir_read_name(dir1)) != NULL) { path1 = g_build_filename (block_dir, dname1, NULL); dir2 = g_dir_open (path1, 0, NULL); if (!dir2) { seaf_warning ("Failed to open block dir %s.\n", path1); g_dir_close (dir1); g_free (path1); g_free (block_dir); return -1; } while ((dname2 = g_dir_read_name(dir2)) != NULL) { path2 = g_build_filename (path1, dname2, NULL); g_unlink (path2); g_free (path2); } g_dir_close (dir2); g_rmdir (path1); g_free (path1); } g_dir_close (dir1); g_rmdir (block_dir); g_free (block_dir); return 0; } static char * get_block_path (BlockBackend *bend, const char *block_sha1, char path[], const char *store_id, int version) { FsPriv *priv = bend->be_priv; char *pos = path; int n; #if defined MIGRATION if (version > 0) { n = snprintf (path, SEAF_PATH_MAX, "%s/%s/", priv->block_dir, store_id); pos += n; } else #else n = snprintf (path, SEAF_PATH_MAX, "%s/%s/", priv->block_dir, store_id); pos += n; #endif memcpy (pos, block_sha1, 2); pos[2] = '/'; pos += 3; memcpy (pos, block_sha1 + 2, 41 - 2); return path; } static int open_tmp_file (BlockBackend *bend, const char *basename, char **path) { FsPriv *priv = bend->be_priv; int fd; *path = g_strdup_printf ("%s/%s.XXXXXX", priv->tmp_dir, basename); fd = g_mkstemp (*path); if (fd < 0) g_free (*path); return fd; } BlockBackend * block_backend_fs_new (const char *seaf_dir, const char *tmp_dir) { BlockBackend *bend; FsPriv *priv; bend = g_new0(BlockBackend, 1); priv = g_new0(FsPriv, 1); bend->be_priv = priv; priv->block_dir = g_build_filename (seaf_dir, "storage", "blocks", NULL); priv->block_dir_len = strlen (priv->block_dir); priv->tmp_dir = g_strdup (tmp_dir); priv->tmp_dir_len = strlen (tmp_dir); if (g_mkdir_with_parents (priv->block_dir, 0777) < 0) { seaf_warning ("Block dir %s does not exist and" " is unable to create\n", priv->block_dir); goto onerror; } if (g_mkdir_with_parents (tmp_dir, 0777) < 0) { seaf_warning ("Blocks tmp dir %s does not exist and" " is unable to create\n", tmp_dir); goto onerror; } bend->open_block = block_backend_fs_open_block; bend->read_block = block_backend_fs_read_block; bend->write_block = block_backend_fs_write_block; bend->commit_block = block_backend_fs_commit_block; bend->close_block = block_backend_fs_close_block; bend->exists = block_backend_fs_block_exists; bend->remove_block = block_backend_fs_remove_block; bend->stat_block = block_backend_fs_stat_block; bend->stat_block_by_handle = block_backend_fs_stat_block_by_handle; bend->block_handle_free = block_backend_fs_block_handle_free; bend->foreach_block = block_backend_fs_foreach_block; bend->remove_store = block_backend_fs_remove_store; bend->copy = block_backend_fs_copy; return bend; onerror: g_free (bend->be_priv); g_free (bend); return NULL; } ================================================ FILE: common/block-backend.c ================================================ #include "common.h" #include "log.h" #include "block-backend.h" extern BlockBackend * block_backend_fs_new (const char *block_dir, const char *tmp_dir); BlockBackend* load_filesystem_block_backend(GKeyFile *config) { BlockBackend *bend; char *tmp_dir; char *block_dir; block_dir = g_key_file_get_string (config, "block_backend", "block_dir", NULL); if (!block_dir) { seaf_warning ("Block dir not set in config.\n"); return NULL; } tmp_dir = g_key_file_get_string (config, "block_backend", "tmp_dir", NULL); if (!tmp_dir) { seaf_warning ("Block tmp dir not set in config.\n"); return NULL; } bend = block_backend_fs_new (block_dir, tmp_dir); g_free (block_dir); g_free (tmp_dir); return bend; } BlockBackend* load_block_backend (GKeyFile *config) { char *backend; BlockBackend *bend; backend = g_key_file_get_string (config, "block_backend", "name", NULL); if (!backend) { return NULL; } if (strcmp(backend, "filesystem") == 0) { bend = load_filesystem_block_backend(config); g_free (backend); return bend; } seaf_warning ("Unknown backend\n"); return NULL; } ================================================ FILE: common/block-backend.h ================================================ /* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */ #ifndef BLOCK_BACKEND_H #define BLOCK_BACKEND_H #include "block.h" typedef struct BlockBackend BlockBackend; struct BlockBackend { BHandle* (*open_block) (BlockBackend *bend, const char *store_id, int version, const char *block_id, int rw_type); int (*read_block) (BlockBackend *bend, BHandle *handle, void *buf, int len); int (*write_block) (BlockBackend *bend, BHandle *handle, const void *buf, int len); int (*commit_block) (BlockBackend *bend, BHandle *handle); int (*close_block) (BlockBackend *bend, BHandle *handle); int (*exists) (BlockBackend *bend, const char *store_id, int version, const char *block_id); int (*remove_block) (BlockBackend *bend, const char *store_id, int version, const char *block_id); BMetadata* (*stat_block) (BlockBackend *bend, const char *store_id, int version, const char *block_id); BMetadata* (*stat_block_by_handle) (BlockBackend *bend, BHandle *handle); void (*block_handle_free) (BlockBackend *bend, BHandle *handle); int (*foreach_block) (BlockBackend *bend, const char *store_id, int version, SeafBlockFunc process, void *user_data); int (*copy) (BlockBackend *bend, const char *src_store_id, int src_version, const char *dst_store_id, int dst_version, const char *block_id); /* Only valid for version 1 repo. Remove all blocks for the repo. */ int (*remove_store) (BlockBackend *bend, const char *store_id); void* be_priv; /* backend private field */ }; BlockBackend* load_block_backend (GKeyFile *config); #endif ================================================ FILE: common/block-mgr.c ================================================ /* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */ #include "common.h" #include "seafile-session.h" #include "utils.h" #include "seaf-utils.h" #include "block-mgr.h" #include "log.h" #include #include #include #include #include #include #include #include #include "block-backend.h" #define SEAF_BLOCK_DIR "blocks" extern BlockBackend * block_backend_fs_new (const char *block_dir, const char *tmp_dir); SeafBlockManager * seaf_block_manager_new (struct _SeafileSession *seaf, const char *seaf_dir) { SeafBlockManager *mgr; mgr = g_new0 (SeafBlockManager, 1); mgr->seaf = seaf; mgr->backend = block_backend_fs_new (seaf_dir, seaf->tmp_file_dir); if (!mgr->backend) { seaf_warning ("[Block mgr] Failed to load backend.\n"); goto onerror; } return mgr; onerror: g_free (mgr); return NULL; } int seaf_block_manager_init (SeafBlockManager *mgr) { return 0; } BlockHandle * seaf_block_manager_open_block (SeafBlockManager *mgr, const char *store_id, int version, const char *block_id, int rw_type) { if (!store_id || !is_uuid_valid(store_id) || !block_id || !is_object_id_valid(block_id)) return NULL; return mgr->backend->open_block (mgr->backend, store_id, version, block_id, rw_type); } int seaf_block_manager_read_block (SeafBlockManager *mgr, BlockHandle *handle, void *buf, int len) { return mgr->backend->read_block (mgr->backend, handle, buf, len); } int seaf_block_manager_write_block (SeafBlockManager *mgr, BlockHandle *handle, const void *buf, int len) { return mgr->backend->write_block (mgr->backend, handle, buf, len); } int seaf_block_manager_close_block (SeafBlockManager *mgr, BlockHandle *handle) { return mgr->backend->close_block (mgr->backend, handle); } void seaf_block_manager_block_handle_free (SeafBlockManager *mgr, BlockHandle *handle) { return mgr->backend->block_handle_free (mgr->backend, handle); } int seaf_block_manager_commit_block (SeafBlockManager *mgr, BlockHandle *handle) { return mgr->backend->commit_block (mgr->backend, handle); } gboolean seaf_block_manager_block_exists (SeafBlockManager *mgr, const char *store_id, int version, const char *block_id) { if (!store_id || !is_uuid_valid(store_id) || !block_id || !is_object_id_valid(block_id)) return FALSE; return mgr->backend->exists (mgr->backend, store_id, version, block_id); } int seaf_block_manager_remove_block (SeafBlockManager *mgr, const char *store_id, int version, const char *block_id) { if (!store_id || !is_uuid_valid(store_id) || !block_id || !is_object_id_valid(block_id)) return -1; return mgr->backend->remove_block (mgr->backend, store_id, version, block_id); } BlockMetadata * seaf_block_manager_stat_block (SeafBlockManager *mgr, const char *store_id, int version, const char *block_id) { if (!store_id || !is_uuid_valid(store_id) || !block_id || !is_object_id_valid(block_id)) return NULL; return mgr->backend->stat_block (mgr->backend, store_id, version, block_id); } BlockMetadata * seaf_block_manager_stat_block_by_handle (SeafBlockManager *mgr, BlockHandle *handle) { return mgr->backend->stat_block_by_handle (mgr->backend, handle); } int seaf_block_manager_foreach_block (SeafBlockManager *mgr, const char *store_id, int version, SeafBlockFunc process, void *user_data) { return mgr->backend->foreach_block (mgr->backend, store_id, version, process, user_data); } int seaf_block_manager_copy_block (SeafBlockManager *mgr, const char *src_store_id, int src_version, const char *dst_store_id, int dst_version, const char *block_id) { if (strcmp (block_id, EMPTY_SHA1) == 0) return 0; if (seaf_block_manager_block_exists (mgr, dst_store_id, dst_version, block_id)) { return 0; } return mgr->backend->copy (mgr->backend, src_store_id, src_version, dst_store_id, dst_version, block_id); } static gboolean get_block_number (const char *store_id, int version, const char *block_id, void *data) { guint64 *n_blocks = data; ++(*n_blocks); return TRUE; } guint64 seaf_block_manager_get_block_number (SeafBlockManager *mgr, const char *store_id, int version) { guint64 n_blocks = 0; seaf_block_manager_foreach_block (mgr, store_id, version, get_block_number, &n_blocks); return n_blocks; } gboolean seaf_block_manager_verify_block (SeafBlockManager *mgr, const char *store_id, int version, const char *block_id, gboolean *io_error) { BlockHandle *h; char buf[10240]; int n; SHA_CTX ctx; guint8 sha1[20]; char check_id[41]; h = seaf_block_manager_open_block (mgr, store_id, version, block_id, BLOCK_READ); if (!h) { seaf_warning ("Failed to open block %s:%.8s.\n", store_id, block_id); *io_error = TRUE; return FALSE; } SHA1_Init (&ctx); while (1) { n = seaf_block_manager_read_block (mgr, h, buf, sizeof(buf)); if (n < 0) { seaf_warning ("Failed to read block %s:%.8s.\n", store_id, block_id); *io_error = TRUE; return FALSE; } if (n == 0) break; SHA1_Update (&ctx, buf, n); } seaf_block_manager_close_block (mgr, h); seaf_block_manager_block_handle_free (mgr, h); SHA1_Final (sha1, &ctx); rawdata_to_hex (sha1, check_id, 20); if (strcmp (check_id, block_id) == 0) return TRUE; else return FALSE; } int seaf_block_manager_remove_store (SeafBlockManager *mgr, const char *store_id) { return mgr->backend->remove_store (mgr->backend, store_id); } ================================================ FILE: common/block-mgr.h ================================================ /* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */ #ifndef SEAF_BLOCK_MGR_H #define SEAF_BLOCK_MGR_H #include #include #include #include "block.h" struct _SeafileSession; typedef struct _SeafBlockManager SeafBlockManager; struct _SeafBlockManager { struct _SeafileSession *seaf; struct BlockBackend *backend; }; SeafBlockManager * seaf_block_manager_new (struct _SeafileSession *seaf, const char *seaf_dir); /* * Open a block for read or write. * * @store_id: id for the block store * @version: data format version for the repo * @block_id: ID of block. * @rw_type: BLOCK_READ or BLOCK_WRITE. * Returns: A handle for the block. */ BlockHandle * seaf_block_manager_open_block (SeafBlockManager *mgr, const char *store_id, int version, const char *block_id, int rw_type); /* * Read data from a block. * The semantics is similar to readn. * * @handle: Hanlde returned by seaf_block_manager_open_block(). * @buf: Data wuold be copied into this buf. * @len: At most @len bytes would be read. * * Returns: the bytes read. */ int seaf_block_manager_read_block (SeafBlockManager *mgr, BlockHandle *handle, void *buf, int len); /* * Write data to a block. * The semantics is similar to writen. * * @handle: Hanlde returned by seaf_block_manager_open_block(). * @buf: Data to be written to the block. * @len: At most @len bytes would be written. * * Returns: the bytes written. */ int seaf_block_manager_write_block (SeafBlockManager *mgr, BlockHandle *handle, const void *buf, int len); /* * Commit a block to storage. * The block must be opened for write. * * @handle: Hanlde returned by seaf_block_manager_open_block(). * * Returns: 0 on success, -1 on error. */ int seaf_block_manager_commit_block (SeafBlockManager *mgr, BlockHandle *handle); /* * Close an open block. * * @handle: Hanlde returned by seaf_block_manager_open_block(). * * Returns: 0 on success, -1 on error. */ int seaf_block_manager_close_block (SeafBlockManager *mgr, BlockHandle *handle); void seaf_block_manager_block_handle_free (SeafBlockManager *mgr, BlockHandle *handle); gboolean seaf_block_manager_block_exists (SeafBlockManager *mgr, const char *store_id, int version, const char *block_id); int seaf_block_manager_remove_block (SeafBlockManager *mgr, const char *store_id, int version, const char *block_id); BlockMetadata * seaf_block_manager_stat_block (SeafBlockManager *mgr, const char *store_id, int version, const char *block_id); BlockMetadata * seaf_block_manager_stat_block_by_handle (SeafBlockManager *mgr, BlockHandle *handle); int seaf_block_manager_foreach_block (SeafBlockManager *mgr, const char *store_id, int version, SeafBlockFunc process, void *user_data); int seaf_block_manager_copy_block (SeafBlockManager *mgr, const char *src_store_id, int src_version, const char *dst_store_id, int dst_version, const char *block_id); /* Remove all blocks for a repo. Only valid for version 1 repo. */ int seaf_block_manager_remove_store (SeafBlockManager *mgr, const char *store_id); guint64 seaf_block_manager_get_block_number (SeafBlockManager *mgr, const char *store_id, int version); gboolean seaf_block_manager_verify_block (SeafBlockManager *mgr, const char *store_id, int version, const char *block_id, gboolean *io_error); #endif ================================================ FILE: common/block-tx-utils.c ================================================ #include "common.h" #define DEBUG_FLAG SEAFILE_DEBUG_TRANSFER #include "log.h" #include "utils.h" #include "block-tx-utils.h" /* Utility functions for block transfer protocol. */ /* Encryption related functions. */ void blocktx_generate_encrypt_key (unsigned char *session_key, int sk_len, unsigned char *key, unsigned char *iv) { EVP_BytesToKey (EVP_aes_256_cbc(), /* cipher mode */ EVP_sha1(), /* message digest */ NULL, /* salt */ session_key, sk_len, 3, /* iteration times */ key, /* the derived key */ iv); /* IV, initial vector */ } int blocktx_encrypt_init (EVP_CIPHER_CTX **ctx, const unsigned char *key, const unsigned char *iv) { int ret; /* Prepare CTX for encryption. */ *ctx = EVP_CIPHER_CTX_new (); ret = EVP_EncryptInit_ex (*ctx, EVP_aes_256_cbc(), /* cipher mode */ NULL, /* engine, NULL for default */ key, /* derived key */ iv); /* initial vector */ if (ret == 0) return -1; return 0; } int blocktx_decrypt_init (EVP_CIPHER_CTX **ctx, const unsigned char *key, const unsigned char *iv) { int ret; /* Prepare CTX for decryption. */ *ctx = EVP_CIPHER_CTX_new(); ret = EVP_DecryptInit_ex (*ctx, EVP_aes_256_cbc(), /* cipher mode */ NULL, /* engine, NULL for default */ key, /* derived key */ iv); /* initial vector */ if (ret == 0) return -1; return 0; } /* Sending frame */ int send_encrypted_data_frame_begin (evutil_socket_t data_fd, int frame_len) { /* Compute data size after encryption. * Block size is 16 bytes and AES always add one padding block. */ int enc_frame_len; enc_frame_len = ((frame_len >> 4) + 1) << 4; enc_frame_len = htonl (enc_frame_len); if (sendn (data_fd, &enc_frame_len, sizeof(int)) < 0) { seaf_warning ("Failed to send frame length: %s.\n", evutil_socket_error_to_string(evutil_socket_geterror(data_fd))); return -1; } return 0; } int send_encrypted_data (EVP_CIPHER_CTX *ctx, evutil_socket_t data_fd, const void *buf, int len) { char out_buf[len + ENC_BLOCK_SIZE]; int out_len; if (EVP_EncryptUpdate (ctx, (unsigned char *)out_buf, &out_len, (unsigned char *)buf, len) == 0) { seaf_warning ("Failed to encrypt data.\n"); return -1; } if (sendn (data_fd, out_buf, out_len) < 0) { seaf_warning ("Failed to write data: %s.\n", evutil_socket_error_to_string(evutil_socket_geterror(data_fd))); return -1; } return 0; } int send_encrypted_data_frame_end (EVP_CIPHER_CTX *ctx, evutil_socket_t data_fd) { char out_buf[ENC_BLOCK_SIZE]; int out_len; if (EVP_EncryptFinal_ex (ctx, (unsigned char *)out_buf, &out_len) == 0) { seaf_warning ("Failed to encrypt data.\n"); return -1; } if (sendn (data_fd, out_buf, out_len) < 0) { seaf_warning ("Failed to write data: %s.\n", evutil_socket_error_to_string(evutil_socket_geterror(data_fd))); return -1; } return 0; } /* Receiving frame */ static int handle_frame_content (struct evbuffer *buf, FrameParser *parser) { char *frame; EVP_CIPHER_CTX *ctx; char *out; int outlen, outlen2; int ret = 0; struct evbuffer *input = buf; if (evbuffer_get_length (input) < parser->enc_frame_len) return 0; if (parser->version == 1) blocktx_decrypt_init (&ctx, parser->key, parser->iv); else if (parser->version == 2) blocktx_decrypt_init (&ctx, parser->key_v2, parser->iv_v2); frame = g_malloc (parser->enc_frame_len); out = g_malloc (parser->enc_frame_len + ENC_BLOCK_SIZE); evbuffer_remove (input, frame, parser->enc_frame_len); if (EVP_DecryptUpdate (ctx, (unsigned char *)out, &outlen, (unsigned char *)frame, parser->enc_frame_len) == 0) { seaf_warning ("Failed to decrypt frame content.\n"); ret = -1; goto out; } if (EVP_DecryptFinal_ex (ctx, (unsigned char *)(out + outlen), &outlen2) == 0) { seaf_warning ("Failed to decrypt frame content.\n"); ret = -1; goto out; } ret = parser->content_cb (out, outlen + outlen2, parser->cbarg); out: g_free (frame); g_free (out); parser->enc_frame_len = 0; EVP_CIPHER_CTX_free (ctx); return ret; } int handle_one_frame (struct evbuffer *buf, FrameParser *parser) { struct evbuffer *input = buf; if (!parser->enc_frame_len) { /* Read the length of the encrypted frame first. */ if (evbuffer_get_length (input) < sizeof(int)) return 0; int frame_len; evbuffer_remove (input, &frame_len, sizeof(int)); parser->enc_frame_len = ntohl (frame_len); if (evbuffer_get_length (input) > 0) return handle_frame_content (buf, parser); return 0; } else { return handle_frame_content (buf, parser); } } static int handle_frame_fragment_content (struct evbuffer *buf, FrameParser *parser) { char *fragment = NULL, *out = NULL; int fragment_len, outlen; int ret = 0; struct evbuffer *input = buf; fragment_len = evbuffer_get_length (input); fragment = g_malloc (fragment_len); evbuffer_remove (input, fragment, fragment_len); out = g_malloc (fragment_len + ENC_BLOCK_SIZE); if (EVP_DecryptUpdate (parser->ctx, (unsigned char *)out, &outlen, (unsigned char *)fragment, fragment_len) == 0) { seaf_warning ("Failed to decrypt frame fragment.\n"); ret = -1; goto out; } ret = parser->fragment_cb (out, outlen, 0, parser->cbarg); if (ret < 0) goto out; parser->remain -= fragment_len; if (parser->remain <= 0) { if (EVP_DecryptFinal_ex (parser->ctx, (unsigned char *)out, &outlen) == 0) { seaf_warning ("Failed to decrypt frame fragment.\n"); ret = -1; goto out; } ret = parser->fragment_cb (out, outlen, 1, parser->cbarg); if (ret < 0) goto out; EVP_CIPHER_CTX_free (parser->ctx); parser->enc_init = FALSE; parser->enc_frame_len = 0; } out: g_free (fragment); g_free (out); if (ret < 0) { EVP_CIPHER_CTX_free (parser->ctx); parser->enc_init = FALSE; parser->enc_frame_len = 0; } return ret; } int handle_frame_fragments (struct evbuffer *buf, FrameParser *parser) { struct evbuffer *input = buf; if (!parser->enc_frame_len) { /* Read the length of the encrypted frame first. */ if (evbuffer_get_length (input) < sizeof(int)) return 0; int frame_len; evbuffer_remove (input, &frame_len, sizeof(int)); parser->enc_frame_len = ntohl (frame_len); parser->remain = parser->enc_frame_len; if (parser->version == 1) blocktx_decrypt_init (&parser->ctx, parser->key, parser->iv); else if (parser->version == 2) blocktx_decrypt_init (&parser->ctx, parser->key_v2, parser->iv_v2); parser->enc_init = TRUE; if (evbuffer_get_length (input) > 0) return handle_frame_fragment_content (buf, parser); return 0; } else { return handle_frame_fragment_content (buf, parser); } } ================================================ FILE: common/block-tx-utils.h ================================================ #ifndef BLOCK_TX_UTILS_H #define BLOCK_TX_UTILS_H #include #include #include /* Common structures and contants shared by the client and server. */ /* We use AES 256 */ #define ENC_KEY_SIZE 32 #define ENC_BLOCK_SIZE 16 #define BLOCK_PROTOCOL_VERSION 2 enum { STATUS_OK = 0, STATUS_VERSION_MISMATCH, STATUS_BAD_REQUEST, STATUS_ACCESS_DENIED, STATUS_INTERNAL_SERVER_ERROR, STATUS_NOT_FOUND, }; struct _HandshakeRequest { gint32 version; gint32 key_len; char enc_session_key[0]; } __attribute__((__packed__)); typedef struct _HandshakeRequest HandshakeRequest; struct _HandshakeResponse { gint32 status; gint32 version; } __attribute__((__packed__)); typedef struct _HandshakeResponse HandshakeResponse; struct _AuthResponse { gint32 status; } __attribute__((__packed__)); typedef struct _AuthResponse AuthResponse; enum { REQUEST_COMMAND_GET = 0, REQUEST_COMMAND_PUT, }; struct _RequestHeader { gint32 command; char block_id[40]; } __attribute__((__packed__)); typedef struct _RequestHeader RequestHeader; struct _ResponseHeader { gint32 status; } __attribute__((__packed__)); typedef struct _ResponseHeader ResponseHeader; /* Utility functions for encryption. */ void blocktx_generate_encrypt_key (unsigned char *session_key, int sk_len, unsigned char *key, unsigned char *iv); int blocktx_encrypt_init (EVP_CIPHER_CTX **ctx, const unsigned char *key, const unsigned char *iv); int blocktx_decrypt_init (EVP_CIPHER_CTX **ctx, const unsigned char *key, const unsigned char *iv); /* * Encrypted data is sent in "frames". * Format of a frame: * * length of data in the frame after encryption + encrypted data. * * Each frame can contain three types of contents: * 1. Auth request or response; * 2. Block request or response header; * 3. Block content. */ int send_encrypted_data_frame_begin (evutil_socket_t data_fd, int frame_len); int send_encrypted_data (EVP_CIPHER_CTX *ctx, evutil_socket_t data_fd, const void *buf, int len); int send_encrypted_data_frame_end (EVP_CIPHER_CTX *ctx, evutil_socket_t data_fd); typedef int (*FrameContentCB) (char *, int, void *); typedef int (*FrameFragmentCB) (char *, int, int, void *); typedef struct _FrameParser { int enc_frame_len; unsigned char key[ENC_KEY_SIZE]; unsigned char iv[ENC_BLOCK_SIZE]; gboolean enc_init; EVP_CIPHER_CTX *ctx; unsigned char key_v2[ENC_KEY_SIZE]; unsigned char iv_v2[ENC_BLOCK_SIZE]; int version; /* Used when parsing fragments */ int remain; FrameContentCB content_cb; FrameFragmentCB fragment_cb; void *cbarg; } FrameParser; /* Handle entire frame all at once. * parser->content_cb() will be called after the entire frame is read. */ int handle_one_frame (struct evbuffer *buf, FrameParser *parser); /* Handle a frame fragment by fragment. * parser->fragment_cb() will be called when any amount data is read. */ int handle_frame_fragments (struct evbuffer *buf, FrameParser *parser); #endif ================================================ FILE: common/block.h ================================================ /* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */ #ifndef BLOCK_H #define BLOCK_H typedef struct _BMetadata BlockMetadata; typedef struct _BMetadata BMetadata; struct _BMetadata { char id[41]; uint32_t size; }; /* Opaque block handle. */ typedef struct _BHandle BlockHandle; typedef struct _BHandle BHandle; enum { BLOCK_READ, BLOCK_WRITE, }; typedef gboolean (*SeafBlockFunc) (const char *store_id, int version, const char *block_id, void *user_data); #endif ================================================ FILE: common/branch-mgr.c ================================================ #include "common.h" #include "log.h" #ifndef SEAFILE_SERVER #include "db.h" #else #include "seaf-db.h" #endif #include "seafile-session.h" #ifdef FULL_FEATURE #include "notif-mgr.h" #endif #include "branch-mgr.h" #define BRANCH_DB "branch.db" SeafBranch * seaf_branch_new (const char *name, const char *repo_id, const char *commit_id) { SeafBranch *branch; branch = g_new0 (SeafBranch, 1); branch->name = g_strdup (name); memcpy (branch->repo_id, repo_id, 36); branch->repo_id[36] = '\0'; memcpy (branch->commit_id, commit_id, 40); branch->commit_id[40] = '\0'; branch->ref = 1; return branch; } void seaf_branch_free (SeafBranch *branch) { if (branch == NULL) return; g_free (branch->name); g_free (branch); } void seaf_branch_list_free (GList *blist) { GList *ptr; for (ptr = blist; ptr; ptr = ptr->next) { seaf_branch_unref (ptr->data); } g_list_free (blist); } void seaf_branch_set_commit (SeafBranch *branch, const char *commit_id) { memcpy (branch->commit_id, commit_id, 40); branch->commit_id[40] = '\0'; } void seaf_branch_ref (SeafBranch *branch) { branch->ref++; } void seaf_branch_unref (SeafBranch *branch) { if (!branch) return; if (--branch->ref <= 0) seaf_branch_free (branch); } struct _SeafBranchManagerPriv { sqlite3 *db; #ifndef SEAFILE_SERVER pthread_mutex_t db_lock; #endif }; static int open_db (SeafBranchManager *mgr); SeafBranchManager * seaf_branch_manager_new (struct _SeafileSession *seaf) { SeafBranchManager *mgr; mgr = g_new0 (SeafBranchManager, 1); mgr->priv = g_new0 (SeafBranchManagerPriv, 1); mgr->seaf = seaf; #ifndef SEAFILE_SERVER pthread_mutex_init (&mgr->priv->db_lock, NULL); #endif return mgr; } int seaf_branch_manager_init (SeafBranchManager *mgr) { return open_db (mgr); } static int open_db (SeafBranchManager *mgr) { if (!mgr->seaf->create_tables && seaf_db_type (mgr->seaf->db) != SEAF_DB_TYPE_PGSQL) return 0; #ifndef SEAFILE_SERVER char *db_path; const char *sql; db_path = g_build_filename (mgr->seaf->seaf_dir, BRANCH_DB, NULL); if (sqlite_open_db (db_path, &mgr->priv->db) < 0) { g_critical ("[Branch mgr] Failed to open branch db\n"); g_free (db_path); return -1; } g_free (db_path); sql = "CREATE TABLE IF NOT EXISTS Branch (" "name TEXT, repo_id TEXT, commit_id TEXT);"; if (sqlite_query_exec (mgr->priv->db, sql) < 0) return -1; sql = "CREATE INDEX IF NOT EXISTS branch_index ON Branch(repo_id, name);"; if (sqlite_query_exec (mgr->priv->db, sql) < 0) return -1; #elif defined FULL_FEATURE char *sql; switch (seaf_db_type (mgr->seaf->db)) { case SEAF_DB_TYPE_MYSQL: sql = "CREATE TABLE IF NOT EXISTS Branch (" "id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, " "name VARCHAR(10), repo_id CHAR(41), commit_id CHAR(41)," "UNIQUE INDEX(repo_id, name)) ENGINE = INNODB"; if (seaf_db_query (mgr->seaf->db, sql) < 0) return -1; break; case SEAF_DB_TYPE_PGSQL: sql = "CREATE TABLE IF NOT EXISTS Branch (" "name VARCHAR(10), repo_id CHAR(40), commit_id CHAR(40)," "PRIMARY KEY (repo_id, name))"; if (seaf_db_query (mgr->seaf->db, sql) < 0) return -1; break; case SEAF_DB_TYPE_SQLITE: sql = "CREATE TABLE IF NOT EXISTS Branch (" "name VARCHAR(10), repo_id CHAR(41), commit_id CHAR(41)," "PRIMARY KEY (repo_id, name))"; if (seaf_db_query (mgr->seaf->db, sql) < 0) return -1; break; } #endif return 0; } int seaf_branch_manager_add_branch (SeafBranchManager *mgr, SeafBranch *branch) { #ifndef SEAFILE_SERVER char sql[256]; pthread_mutex_lock (&mgr->priv->db_lock); sqlite3_snprintf (sizeof(sql), sql, "SELECT 1 FROM Branch WHERE name=%Q and repo_id=%Q", branch->name, branch->repo_id); if (sqlite_check_for_existence (mgr->priv->db, sql)) sqlite3_snprintf (sizeof(sql), sql, "UPDATE Branch SET commit_id=%Q WHERE " "name=%Q and repo_id=%Q", branch->commit_id, branch->name, branch->repo_id); else sqlite3_snprintf (sizeof(sql), sql, "INSERT INTO Branch (name, repo_id, commit_id) VALUES (%Q, %Q, %Q)", branch->name, branch->repo_id, branch->commit_id); sqlite_query_exec (mgr->priv->db, sql); pthread_mutex_unlock (&mgr->priv->db_lock); return 0; #else char *sql; SeafDB *db = mgr->seaf->db; if (seaf_db_type(db) == SEAF_DB_TYPE_PGSQL) { gboolean exists, err; int rc; sql = "SELECT repo_id FROM Branch WHERE name=? AND repo_id=?"; exists = seaf_db_statement_exists(db, sql, &err, 2, "string", branch->name, "string", branch->repo_id); if (err) return -1; if (exists) rc = seaf_db_statement_query (db, "UPDATE Branch SET commit_id=? " "WHERE name=? AND repo_id=?", 3, "string", branch->commit_id, "string", branch->name, "string", branch->repo_id); else rc = seaf_db_statement_query (db, "INSERT INTO Branch (name, repo_id, commit_id) VALUES (?, ?, ?)", 3, "string", branch->name, "string", branch->repo_id, "string", branch->commit_id); if (rc < 0) return -1; } else { int rc = seaf_db_statement_query (db, "REPLACE INTO Branch (name, repo_id, commit_id) VALUES (?, ?, ?)", 3, "string", branch->name, "string", branch->repo_id, "string", branch->commit_id); if (rc < 0) return -1; } return 0; #endif } int seaf_branch_manager_del_branch (SeafBranchManager *mgr, const char *repo_id, const char *name) { #ifndef SEAFILE_SERVER char *sql; pthread_mutex_lock (&mgr->priv->db_lock); sql = sqlite3_mprintf ("DELETE FROM Branch WHERE name = %Q AND " "repo_id = '%s'", name, repo_id); if (sqlite_query_exec (mgr->priv->db, sql) < 0) seaf_warning ("Delete branch %s failed\n", name); sqlite3_free (sql); pthread_mutex_unlock (&mgr->priv->db_lock); return 0; #else int rc = seaf_db_statement_query (mgr->seaf->db, "DELETE FROM Branch WHERE name=? AND repo_id=?", 2, "string", name, "string", repo_id); if (rc < 0) return -1; return 0; #endif } int seaf_branch_manager_update_branch (SeafBranchManager *mgr, SeafBranch *branch) { #ifndef SEAFILE_SERVER sqlite3 *db; char *sql; pthread_mutex_lock (&mgr->priv->db_lock); db = mgr->priv->db; sql = sqlite3_mprintf ("UPDATE Branch SET commit_id = %Q " "WHERE name = %Q AND repo_id = %Q", branch->commit_id, branch->name, branch->repo_id); sqlite_query_exec (db, sql); sqlite3_free (sql); pthread_mutex_unlock (&mgr->priv->db_lock); return 0; #else int rc = seaf_db_statement_query (mgr->seaf->db, "UPDATE Branch SET commit_id = ? " "WHERE name = ? AND repo_id = ?", 3, "string", branch->commit_id, "string", branch->name, "string", branch->repo_id); if (rc < 0) return -1; return 0; #endif } #if defined( SEAFILE_SERVER ) && defined( FULL_FEATURE ) #include "mq-mgr.h" static gboolean get_commit_id (SeafDBRow *row, void *data) { char *out_commit_id = data; const char *commit_id; commit_id = seaf_db_row_get_column_text (row, 0); memcpy (out_commit_id, commit_id, 41); out_commit_id[40] = '\0'; return FALSE; } static void publish_repo_update_event (const char *repo_id, const char *commit_id) { json_t *msg = json_object (); char *msg_str = NULL; json_object_set_new (msg, "msg_type", json_string("repo-update")); json_object_set_new (msg, "repo_id", json_string(repo_id)); json_object_set_new (msg, "commit_id", json_string(commit_id)); msg_str = json_dumps (msg, JSON_PRESERVE_ORDER); seaf_mq_manager_publish_event (seaf->mq_mgr, SEAFILE_SERVER_CHANNEL_EVENT, msg_str); g_free (msg_str); json_decref (msg); } static void notify_repo_update (const char *repo_id, const char *commit_id) { json_t *event = NULL; json_t *content = NULL; char *msg = NULL; event = json_object (); content = json_object (); json_object_set_new (event, "type", json_string("repo-update")); json_object_set_new (content, "repo_id", json_string(repo_id)); json_object_set_new (content, "commit_id", json_string(commit_id)); json_object_set_new (event, "content", content); msg = json_dumps (event, JSON_COMPACT); if (seaf->notif_mgr) seaf_notif_manager_send_event (seaf->notif_mgr, msg); json_decref (event); g_free (msg); } static void on_branch_updated (SeafBranchManager *mgr, SeafBranch *branch) { if (seaf->is_repair) return; seaf_repo_manager_update_repo_info (seaf->repo_mgr, branch->repo_id, branch->commit_id); notify_repo_update(branch->repo_id, branch->commit_id); if (seaf_repo_manager_is_virtual_repo (seaf->repo_mgr, branch->repo_id)) return; publish_repo_update_event (branch->repo_id, branch->commit_id); } static gboolean get_gc_id (SeafDBRow *row, void *data) { char **out_gc_id = data; *out_gc_id = g_strdup(seaf_db_row_get_column_text (row, 0)); return FALSE; } int seaf_branch_manager_test_and_update_branch (SeafBranchManager *mgr, SeafBranch *branch, const char *old_commit_id, gboolean check_gc, const char *last_gc_id, const char *origin_repo_id, gboolean *gc_conflict) { SeafDBTrans *trans; char *sql; char commit_id[41] = { 0 }; char *gc_id = NULL; if (check_gc) *gc_conflict = FALSE; trans = seaf_db_begin_transaction (mgr->seaf->db); if (!trans) return -1; if (check_gc) { sql = "SELECT gc_id FROM GCID WHERE repo_id = ? FOR UPDATE"; if (!origin_repo_id) { if (seaf_db_trans_foreach_selected_row (trans, sql, get_gc_id, &gc_id, 1, "string", branch->repo_id) < 0) { seaf_db_rollback (trans); seaf_db_trans_close (trans); return -1; } } else { if (seaf_db_trans_foreach_selected_row (trans, sql, get_gc_id, &gc_id, 1, "string", origin_repo_id) < 0) { seaf_db_rollback (trans); seaf_db_trans_close (trans); return -1; } } if (g_strcmp0 (last_gc_id, gc_id) != 0) { seaf_warning ("Head branch update for repo %s conflicts with GC.\n", branch->repo_id); seaf_db_rollback (trans); seaf_db_trans_close (trans); *gc_conflict = TRUE; g_free (gc_id); return -1; } g_free (gc_id); } switch (seaf_db_type (mgr->seaf->db)) { case SEAF_DB_TYPE_MYSQL: case SEAF_DB_TYPE_PGSQL: sql = "SELECT commit_id FROM Branch WHERE name=? " "AND repo_id=? FOR UPDATE"; break; case SEAF_DB_TYPE_SQLITE: sql = "SELECT commit_id FROM Branch WHERE name=? " "AND repo_id=?"; break; default: g_return_val_if_reached (-1); } if (seaf_db_trans_foreach_selected_row (trans, sql, get_commit_id, commit_id, 2, "string", branch->name, "string", branch->repo_id) < 0) { seaf_db_rollback (trans); seaf_db_trans_close (trans); return -1; } if (strcmp (old_commit_id, commit_id) != 0) { seaf_db_rollback (trans); seaf_db_trans_close (trans); return -1; } sql = "UPDATE Branch SET commit_id = ? " "WHERE name = ? AND repo_id = ?"; if (seaf_db_trans_query (trans, sql, 3, "string", branch->commit_id, "string", branch->name, "string", branch->repo_id) < 0) { seaf_db_rollback (trans); seaf_db_trans_close (trans); return -1; } if (seaf_db_commit (trans) < 0) { seaf_db_rollback (trans); seaf_db_trans_close (trans); return -1; } seaf_db_trans_close (trans); on_branch_updated (mgr, branch); return 0; } #endif #ifndef SEAFILE_SERVER static SeafBranch * real_get_branch (SeafBranchManager *mgr, const char *repo_id, const char *name) { SeafBranch *branch = NULL; sqlite3_stmt *stmt; sqlite3 *db; char *sql; int result; pthread_mutex_lock (&mgr->priv->db_lock); db = mgr->priv->db; sql = sqlite3_mprintf ("SELECT commit_id FROM Branch " "WHERE name = %Q and repo_id='%s'", name, repo_id); if (!(stmt = sqlite_query_prepare (db, sql))) { seaf_warning ("[Branch mgr] Couldn't prepare query %s\n", sql); sqlite3_free (sql); pthread_mutex_unlock (&mgr->priv->db_lock); return NULL; } sqlite3_free (sql); result = sqlite3_step (stmt); if (result == SQLITE_ROW) { char *commit_id = (char *)sqlite3_column_text (stmt, 0); branch = seaf_branch_new (name, repo_id, commit_id); pthread_mutex_unlock (&mgr->priv->db_lock); sqlite3_finalize (stmt); return branch; } else if (result == SQLITE_ERROR) { const char *str = sqlite3_errmsg (db); seaf_warning ("Couldn't prepare query, error: %d->'%s'\n", result, str ? str : "no error given"); } sqlite3_finalize (stmt); pthread_mutex_unlock (&mgr->priv->db_lock); return NULL; } SeafBranch * seaf_branch_manager_get_branch (SeafBranchManager *mgr, const char *repo_id, const char *name) { SeafBranch *branch; /* "fetch_head" maps to "local" or "master" on client (LAN sync) */ if (strcmp (name, "fetch_head") == 0) { branch = real_get_branch (mgr, repo_id, "local"); if (!branch) { branch = real_get_branch (mgr, repo_id, "master"); } return branch; } else { return real_get_branch (mgr, repo_id, name); } } #else static gboolean get_branch (SeafDBRow *row, void *vid) { char *ret = vid; const char *commit_id; commit_id = seaf_db_row_get_column_text (row, 0); memcpy (ret, commit_id, 41); return FALSE; } static SeafBranch * real_get_branch (SeafBranchManager *mgr, const char *repo_id, const char *name) { char commit_id[41]; char *sql; commit_id[0] = 0; sql = "SELECT commit_id FROM Branch WHERE name=? AND repo_id=?"; if (seaf_db_statement_foreach_row (mgr->seaf->db, sql, get_branch, commit_id, 2, "string", name, "string", repo_id) < 0) { seaf_warning ("[branch mgr] DB error when get branch %s.\n", name); return NULL; } if (commit_id[0] == 0) return NULL; return seaf_branch_new (name, repo_id, commit_id); } SeafBranch * seaf_branch_manager_get_branch (SeafBranchManager *mgr, const char *repo_id, const char *name) { SeafBranch *branch; /* "fetch_head" maps to "master" on server. */ if (strcmp (name, "fetch_head") == 0) { branch = real_get_branch (mgr, repo_id, "master"); return branch; } else { return real_get_branch (mgr, repo_id, name); } } #endif /* not SEAFILE_SERVER */ gboolean seaf_branch_manager_branch_exists (SeafBranchManager *mgr, const char *repo_id, const char *name) { #ifndef SEAFILE_SERVER char *sql; gboolean ret; pthread_mutex_lock (&mgr->priv->db_lock); sql = sqlite3_mprintf ("SELECT name FROM Branch WHERE name = %Q " "AND repo_id='%s'", name, repo_id); ret = sqlite_check_for_existence (mgr->priv->db, sql); sqlite3_free (sql); pthread_mutex_unlock (&mgr->priv->db_lock); return ret; #else gboolean db_err = FALSE; return seaf_db_statement_exists (mgr->seaf->db, "SELECT name FROM Branch WHERE name=? " "AND repo_id=?", &db_err, 2, "string", name, "string", repo_id); #endif } #ifndef SEAFILE_SERVER GList * seaf_branch_manager_get_branch_list (SeafBranchManager *mgr, const char *repo_id) { sqlite3 *db = mgr->priv->db; int result; sqlite3_stmt *stmt; char sql[256]; char *name; char *commit_id; GList *ret = NULL; SeafBranch *branch; snprintf (sql, 256, "SELECT name, commit_id FROM branch WHERE repo_id ='%s'", repo_id); pthread_mutex_lock (&mgr->priv->db_lock); if ( !(stmt = sqlite_query_prepare(db, sql)) ) { pthread_mutex_unlock (&mgr->priv->db_lock); return NULL; } while (1) { result = sqlite3_step (stmt); if (result == SQLITE_ROW) { name = (char *)sqlite3_column_text(stmt, 0); commit_id = (char *)sqlite3_column_text(stmt, 1); branch = seaf_branch_new (name, repo_id, commit_id); ret = g_list_prepend (ret, branch); } if (result == SQLITE_DONE) break; if (result == SQLITE_ERROR) { const gchar *str = sqlite3_errmsg (db); seaf_warning ("Couldn't prepare query, error: %d->'%s'\n", result, str ? str : "no error given"); sqlite3_finalize (stmt); seaf_branch_list_free (ret); pthread_mutex_unlock (&mgr->priv->db_lock); return NULL; } } sqlite3_finalize (stmt); pthread_mutex_unlock (&mgr->priv->db_lock); return g_list_reverse(ret); } #else static gboolean get_branches (SeafDBRow *row, void *vplist) { GList **plist = vplist; const char *commit_id; const char *name; const char *repo_id; SeafBranch *branch; name = seaf_db_row_get_column_text (row, 0); repo_id = seaf_db_row_get_column_text (row, 1); commit_id = seaf_db_row_get_column_text (row, 2); branch = seaf_branch_new (name, repo_id, commit_id); *plist = g_list_prepend (*plist, branch); return TRUE; } GList * seaf_branch_manager_get_branch_list (SeafBranchManager *mgr, const char *repo_id) { GList *ret = NULL; char *sql; sql = "SELECT name, repo_id, commit_id FROM Branch WHERE repo_id=?"; if (seaf_db_statement_foreach_row (mgr->seaf->db, sql, get_branches, &ret, 1, "string", repo_id) < 0) { seaf_warning ("[branch mgr] DB error when get branch list.\n"); return NULL; } return ret; } #endif ================================================ FILE: common/branch-mgr.h ================================================ #ifndef SEAF_BRANCH_MGR_H #define SEAF_BRANCH_MGR_H #include "commit-mgr.h" #define NO_BRANCH "-" typedef struct _SeafBranch SeafBranch; struct _SeafBranch { int ref; char *name; char repo_id[37]; char commit_id[41]; }; SeafBranch *seaf_branch_new (const char *name, const char *repo_id, const char *commit_id); void seaf_branch_free (SeafBranch *branch); void seaf_branch_set_commit (SeafBranch *branch, const char *commit_id); void seaf_branch_ref (SeafBranch *branch); void seaf_branch_unref (SeafBranch *branch); typedef struct _SeafBranchManager SeafBranchManager; typedef struct _SeafBranchManagerPriv SeafBranchManagerPriv; struct _SeafileSession; struct _SeafBranchManager { struct _SeafileSession *seaf; SeafBranchManagerPriv *priv; }; SeafBranchManager *seaf_branch_manager_new (struct _SeafileSession *seaf); int seaf_branch_manager_init (SeafBranchManager *mgr); int seaf_branch_manager_add_branch (SeafBranchManager *mgr, SeafBranch *branch); int seaf_branch_manager_del_branch (SeafBranchManager *mgr, const char *repo_id, const char *name); void seaf_branch_list_free (GList *blist); int seaf_branch_manager_update_branch (SeafBranchManager *mgr, SeafBranch *branch); #ifdef SEAFILE_SERVER /** * Atomically test whether the current head commit id on @branch * is the same as @old_commit_id and update branch in db. */ int seaf_branch_manager_test_and_update_branch (SeafBranchManager *mgr, SeafBranch *branch, const char *old_commit_id, gboolean check_gc, const char *last_gc_id, const char *origin_repo_id, gboolean *gc_conflict); #endif SeafBranch * seaf_branch_manager_get_branch (SeafBranchManager *mgr, const char *repo_id, const char *name); gboolean seaf_branch_manager_branch_exists (SeafBranchManager *mgr, const char *repo_id, const char *name); GList * seaf_branch_manager_get_branch_list (SeafBranchManager *mgr, const char *repo_id); gint64 seaf_branch_manager_calculate_branch_size (SeafBranchManager *mgr, const char *repo_id, const char *commit_id); #endif /* SEAF_BRANCH_MGR_H */ ================================================ FILE: common/cdc/Makefile.am ================================================ AM_CFLAGS = -I$(top_srcdir)/common -I$(top_srcdir)/lib \ -Wall @GLIB2_CFLAGS@ @MSVC_CFLAGS@ noinst_LTLIBRARIES = libcdc.la noinst_HEADERS = cdc.h rabin-checksum.h libcdc_la_SOURCES = cdc.c rabin-checksum.c libcdc_la_LDFLAGS = -Wl,-z -Wl,defs libcdc_la_LIBADD = @SSL_LIBS@ @GLIB2_LIBS@ \ $(top_builddir)/lib/libseafile_common.la ================================================ FILE: common/cdc/cdc.c ================================================ /* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */ #include "common.h" #include "log.h" #include #include #include #include #include #include #include #include #include "utils.h" #include "cdc.h" #include "../seafile-crypt.h" #include "rabin-checksum.h" #define finger rabin_checksum #define rolling_finger rabin_rolling_checksum #define BLOCK_SZ (1024*1024*1) #define BLOCK_MIN_SZ (1024*256) #define BLOCK_MAX_SZ (1024*1024*4) #define BLOCK_WIN_SZ 48 #define NAME_MAX_SZ 4096 #define BREAK_VALUE 0x0013 ///0x0513 #define READ_SIZE 1024 * 4 #define BYTE_TO_HEX(b) (((b)>=10)?('a'+b-10):('0'+b)) static int default_write_chunk (CDCDescriptor *chunk_descr) { char filename[NAME_MAX_SZ]; char chksum_str[CHECKSUM_LENGTH *2 + 1]; int fd_chunk, ret; memset(chksum_str, 0, sizeof(chksum_str)); rawdata_to_hex (chunk_descr->checksum, chksum_str, CHECKSUM_LENGTH); snprintf (filename, NAME_MAX_SZ, "./%s", chksum_str); fd_chunk = g_open (filename, O_RDWR | O_CREAT | O_BINARY, 0644); if (fd_chunk < 0) return -1; ret = writen (fd_chunk, chunk_descr->block_buf, chunk_descr->len); close (fd_chunk); return ret; } static int init_cdc_file_descriptor (int fd, uint64_t file_size, CDCFileDescriptor *file_descr) { int max_block_nr = 0; int block_min_sz = 0; file_descr->block_nr = 0; if (file_descr->block_min_sz <= 0) file_descr->block_min_sz = BLOCK_MIN_SZ; if (file_descr->block_max_sz <= 0) file_descr->block_max_sz = BLOCK_MAX_SZ; if (file_descr->block_sz <= 0) file_descr->block_sz = BLOCK_SZ; if (file_descr->write_block == NULL) file_descr->write_block = (WriteblockFunc)default_write_chunk; block_min_sz = file_descr->block_min_sz; max_block_nr = ((file_size + block_min_sz - 1) / block_min_sz); file_descr->blk_sha1s = (uint8_t *)calloc (sizeof(uint8_t), max_block_nr * CHECKSUM_LENGTH); file_descr->max_block_nr = max_block_nr; return 0; } #define WRITE_CDC_BLOCK(block_sz, write_data) \ do { \ int _block_sz = (block_sz); \ chunk_descr.len = _block_sz; \ chunk_descr.offset = offset; \ ret = file_descr->write_block (file_descr->repo_id, \ file_descr->version, \ &chunk_descr, \ crypt, chunk_descr.checksum, \ (write_data)); \ if (ret < 0) { \ free (buf); \ g_warning ("CDC: failed to write chunk.\n"); \ return -1; \ } \ memcpy (file_descr->blk_sha1s + \ file_descr->block_nr * CHECKSUM_LENGTH, \ chunk_descr.checksum, CHECKSUM_LENGTH); \ SHA1_Update (&file_ctx, chunk_descr.checksum, 20); \ file_descr->block_nr++; \ offset += _block_sz; \ \ memmove (buf, buf + _block_sz, tail - _block_sz); \ tail = tail - _block_sz; \ cur = 0; \ }while(0); /* content-defined chunking */ int file_chunk_cdc(int fd_src, CDCFileDescriptor *file_descr, SeafileCrypt *crypt, gboolean write_data, gint64 *indexed) { char *buf; uint32_t buf_sz; SHA_CTX file_ctx; CDCDescriptor chunk_descr; SHA1_Init (&file_ctx); SeafStat sb; if (seaf_fstat (fd_src, &sb) < 0) { seaf_warning ("CDC: failed to stat: %s.\n", strerror(errno)); return -1; } uint64_t expected_size = sb.st_size; init_cdc_file_descriptor (fd_src, expected_size, file_descr); uint32_t block_min_sz = file_descr->block_min_sz; uint32_t block_mask = file_descr->block_sz - 1; int fingerprint = 0; int offset = 0; int ret = 0; int tail, cur, rsize; buf_sz = file_descr->block_max_sz; buf = chunk_descr.block_buf = malloc (buf_sz); if (!buf) return -1; /* buf: a fix-sized buffer. * cur: data behind (inclusive) this offset has been scanned. * cur + 1 is the bytes that has been scanned. * tail: length of data loaded into memory. buf[tail] is invalid. */ tail = cur = 0; while (1) { if (tail < block_min_sz) { rsize = block_min_sz - tail + READ_SIZE; } else { rsize = (buf_sz - tail < READ_SIZE) ? (buf_sz - tail) : READ_SIZE; } ret = readn (fd_src, buf + tail, rsize); if (ret < 0) { seaf_warning ("CDC: failed to read: %s.\n", strerror(errno)); free (buf); return -1; } tail += ret; file_descr->file_size += ret; if (file_descr->file_size > expected_size) { seaf_warning ("File size changed while chunking.\n"); free (buf); return -1; } /* We've read all the data in this file. Output the block immediately * in two cases: * 1. The data left in the file is less than block_min_sz; * 2. We cannot find the break value until the end of this file. */ if (tail < block_min_sz || cur >= tail) { if (tail > 0) { if (file_descr->block_nr == file_descr->max_block_nr) { seaf_warning ("Block id array is not large enough, bail out.\n"); free (buf); return -1; } gint64 idx_size = tail; WRITE_CDC_BLOCK (tail, write_data); if (indexed) *indexed += idx_size; } break; } /* * A block is at least of size block_min_sz. */ if (cur < block_min_sz - 1) cur = block_min_sz - 1; while (cur < tail) { fingerprint = (cur == block_min_sz - 1) ? finger(buf + cur - BLOCK_WIN_SZ + 1, BLOCK_WIN_SZ) : rolling_finger (fingerprint, BLOCK_WIN_SZ, *(buf+cur-BLOCK_WIN_SZ), *(buf + cur)); /* get a chunk, write block info to chunk file */ if (((fingerprint & block_mask) == ((BREAK_VALUE & block_mask))) || cur + 1 >= file_descr->block_max_sz) { if (file_descr->block_nr == file_descr->max_block_nr) { seaf_warning ("Block id array is not large enough, bail out.\n"); free (buf); return -1; } gint64 idx_size = cur + 1; WRITE_CDC_BLOCK (cur + 1, write_data); if (indexed) *indexed += idx_size; break; } else { cur ++; } } } SHA1_Final (file_descr->file_sum, &file_ctx); free (buf); return 0; } int filename_chunk_cdc(const char *filename, CDCFileDescriptor *file_descr, SeafileCrypt *crypt, gboolean write_data, gint64 *indexed) { int fd_src = seaf_util_open (filename, O_RDONLY | O_BINARY); if (fd_src < 0) { seaf_warning ("CDC: failed to open %s.\n", filename); return -1; } int ret = file_chunk_cdc (fd_src, file_descr, crypt, write_data, indexed); close (fd_src); return ret; } void cdc_init () { rabin_init (BLOCK_WIN_SZ); } ================================================ FILE: common/cdc/cdc.h ================================================ /* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */ #ifndef _CDC_H #define _CDC_H #include #include #ifdef HAVE_MD5 #include "md5.h" #define get_checksum md5 #define CHECKSUM_LENGTH 16 #else #include #define get_checksum sha1 #define CHECKSUM_LENGTH 20 #endif #ifndef O_BINARY #define O_BINARY 0 #endif struct _CDCFileDescriptor; struct _CDCDescriptor; struct SeafileCrypt; typedef int (*WriteblockFunc)(const char *repo_id, int version, struct _CDCDescriptor *chunk_descr, struct SeafileCrypt *crypt, uint8_t *checksum, gboolean write_data); /* define chunk file header and block entry */ typedef struct _CDCFileDescriptor { uint32_t block_min_sz; uint32_t block_max_sz; uint32_t block_sz; uint64_t file_size; uint32_t block_nr; uint8_t *blk_sha1s; int max_block_nr; uint8_t file_sum[CHECKSUM_LENGTH]; WriteblockFunc write_block; char repo_id[37]; int version; } CDCFileDescriptor; typedef struct _CDCDescriptor { uint64_t offset; uint32_t len; uint8_t checksum[CHECKSUM_LENGTH]; char *block_buf; int result; } CDCDescriptor; int file_chunk_cdc(int fd_src, CDCFileDescriptor *file_descr, struct SeafileCrypt *crypt, gboolean write_data, gint64 *indexed); int filename_chunk_cdc(const char *filename, CDCFileDescriptor *file_descr, struct SeafileCrypt *crypt, gboolean write_data, gint64 *indexed); void cdc_init (); #endif ================================================ FILE: common/cdc/rabin-checksum.c ================================================ #include #include "rabin-checksum.h" #ifdef WIN32 #include #ifndef u_int typedef unsigned int u_int; #endif #ifndef u_char typedef unsigned char u_char; #endif #ifndef u_short typedef unsigned short u_short; #endif #ifndef u_long typedef unsigned long u_long; #endif #ifndef u_int16_t typedef uint16_t u_int16_t; #endif #ifndef u_int32_t typedef uint32_t u_int32_t; #endif #ifndef u_int64_t typedef uint64_t u_int64_t; #endif #endif #define INT64(n) n##LL #define MSB64 INT64(0x8000000000000000) static u_int64_t poly = 0xbfe6b8a5bf378d83LL; static u_int64_t T[256]; static u_int64_t U[256]; static int shift; /* Highest bit set in a byte */ static const char bytemsb[0x100] = { 0, 1, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, }; /* Find last set (most significant bit) */ static inline u_int fls32 (u_int32_t v) { if (v & 0xffff0000) { if (v & 0xff000000) return 24 + bytemsb[v>>24]; else return 16 + bytemsb[v>>16]; } if (v & 0x0000ff00) return 8 + bytemsb[v>>8]; else return bytemsb[v]; } static inline char fls64 (u_int64_t v) { u_int32_t h; if ((h = v >> 32)) return 32 + fls32 (h); else return fls32 ((u_int32_t) v); } u_int64_t polymod (u_int64_t nh, u_int64_t nl, u_int64_t d) { int i = 0; int k = fls64 (d) - 1; d <<= 63 - k; if (nh) { if (nh & MSB64) nh ^= d; for (i = 62; i >= 0; i--) if (nh & ((u_int64_t) 1) << i) { nh ^= d >> (63 - i); nl ^= d << (i + 1); } } for (i = 63; i >= k; i--) { if (nl & INT64 (1) << i) nl ^= d >> (63 - i); } return nl; } void polymult (u_int64_t *php, u_int64_t *plp, u_int64_t x, u_int64_t y) { int i; u_int64_t ph = 0, pl = 0; if (x & 1) pl = y; for (i = 1; i < 64; i++) if (x & (INT64 (1) << i)) { ph ^= y >> (64 - i); pl ^= y << i; } if (php) *php = ph; if (plp) *plp = pl; } u_int64_t polymmult (u_int64_t x, u_int64_t y, u_int64_t d) { u_int64_t h, l; polymult (&h, &l, x, y); return polymod (h, l, d); } static u_int64_t append8 (u_int64_t p, u_char m) { return ((p << 8) | m) ^ T[p >> shift]; } static void calcT (u_int64_t poly) { int j = 0; int xshift = fls64 (poly) - 1; shift = xshift - 8; u_int64_t T1 = polymod (0, INT64 (1) << xshift, poly); for (j = 0; j < 256; j++) { T[j] = polymmult (j, T1, poly) | ((u_int64_t) j << xshift); } } static void calcU(int size) { int i; u_int64_t sizeshift = 1; for (i = 1; i < size; i++) sizeshift = append8 (sizeshift, 0); for (i = 0; i < 256; i++) U[i] = polymmult (i, sizeshift, poly); } void rabin_init(int len) { calcT(poly); calcU(len); } /* * a simple 32 bit checksum that can be upadted from end */ unsigned int rabin_checksum(char *buf, int len) { int i; unsigned int sum = 0; for (i = 0; i < len; ++i) { sum = rabin_rolling_checksum (sum, len, 0, buf[i]); } return sum; } unsigned int rabin_rolling_checksum(unsigned int csum, int len, char c1, char c2) { return append8(csum ^ U[(unsigned char)c1], c2); } ================================================ FILE: common/cdc/rabin-checksum.h ================================================ #ifndef _RABIN_CHECKSUM_H #define _RABIN_CHECKSUM_H unsigned int rabin_checksum(char *buf, int len); unsigned int rabin_rolling_checksum(unsigned int csum, int len, char c1, char c2); void rabin_init (int len); #endif ================================================ FILE: common/commit-mgr.c ================================================ /* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */ #include "common.h" #include "log.h" #include #include #include "utils.h" #include "db.h" #include "searpc-utils.h" #include "seafile-session.h" #include "commit-mgr.h" #include "seaf-utils.h" #define MAX_TIME_SKEW 259200 /* 3 days */ struct _SeafCommitManagerPriv { int dummy; }; static SeafCommit * load_commit (SeafCommitManager *mgr, const char *repo_id, int version, const char *commit_id); static int save_commit (SeafCommitManager *manager, const char *repo_id, int version, SeafCommit *commit); static void delete_commit (SeafCommitManager *mgr, const char *repo_id, int version, const char *id); static json_t * commit_to_json_object (SeafCommit *commit); static SeafCommit * commit_from_json_object (const char *id, json_t *object); static void compute_commit_id (SeafCommit* commit) { SHA_CTX ctx; uint8_t sha1[20]; gint64 ctime_n; SHA1_Init (&ctx); SHA1_Update (&ctx, commit->root_id, 41); SHA1_Update (&ctx, commit->creator_id, 41); if (commit->creator_name) SHA1_Update (&ctx, commit->creator_name, strlen(commit->creator_name)+1); SHA1_Update (&ctx, commit->desc, strlen(commit->desc)+1); /* convert to network byte order */ ctime_n = hton64 (commit->ctime); SHA1_Update (&ctx, &ctime_n, sizeof(ctime_n)); SHA1_Final (sha1, &ctx); rawdata_to_hex (sha1, commit->commit_id, 20); } SeafCommit* seaf_commit_new (const char *commit_id, const char *repo_id, const char *root_id, const char *creator_name, const char *creator_id, const char *desc, guint64 ctime) { SeafCommit *commit; g_return_val_if_fail (repo_id != NULL, NULL); g_return_val_if_fail (root_id != NULL && creator_id != NULL, NULL); commit = g_new0 (SeafCommit, 1); memcpy (commit->repo_id, repo_id, 36); commit->repo_id[36] = '\0'; memcpy (commit->root_id, root_id, 40); commit->root_id[40] = '\0'; commit->creator_name = g_strdup (creator_name); memcpy (commit->creator_id, creator_id, 40); commit->creator_id[40] = '\0'; commit->desc = g_strdup (desc); if (ctime == 0) { /* TODO: use more precise timer */ commit->ctime = (gint64)time(NULL); } else commit->ctime = ctime; if (commit_id == NULL) compute_commit_id (commit); else { memcpy (commit->commit_id, commit_id, 40); commit->commit_id[40] = '\0'; } commit->ref = 1; return commit; } char * seaf_commit_to_data (SeafCommit *commit, gsize *len) { json_t *object; char *json_data; char *ret; object = commit_to_json_object (commit); json_data = json_dumps (object, 0); *len = strlen (json_data); json_decref (object); ret = g_strdup (json_data); free (json_data); return ret; } SeafCommit * seaf_commit_from_data (const char *id, char *data, gsize len) { json_t *object; SeafCommit *commit; json_error_t jerror; object = json_loadb (data, len, 0, &jerror); if (!object) { /* Perhaps the commit object contains invalid UTF-8 character. */ if (data[len-1] == 0) clean_utf8_data (data, len - 1); else clean_utf8_data (data, len); object = json_loadb (data, len, 0, &jerror); if (!object) { if (jerror.text) seaf_warning ("Failed to load commit json: %s.\n", jerror.text); else seaf_warning ("Failed to load commit json.\n"); return NULL; } } commit = commit_from_json_object (id, object); json_decref (object); return commit; } static void seaf_commit_free (SeafCommit *commit) { g_free (commit->desc); g_free (commit->creator_name); if (commit->parent_id) g_free (commit->parent_id); if (commit->second_parent_id) g_free (commit->second_parent_id); if (commit->repo_name) g_free (commit->repo_name); if (commit->repo_desc) g_free (commit->repo_desc); if (commit->device_name) g_free (commit->device_name); if (commit->repo_category) g_free (commit->repo_category); if (commit->salt) g_free (commit->salt); g_free (commit->client_version); g_free (commit->magic); g_free (commit->random_key); g_free (commit->pwd_hash); g_free (commit->pwd_hash_algo); g_free (commit->pwd_hash_params); g_free (commit); } void seaf_commit_ref (SeafCommit *commit) { commit->ref++; } void seaf_commit_unref (SeafCommit *commit) { if (!commit) return; if (--commit->ref <= 0) seaf_commit_free (commit); } SeafCommitManager* seaf_commit_manager_new (SeafileSession *seaf) { SeafCommitManager *mgr = g_new0 (SeafCommitManager, 1); mgr->priv = g_new0 (SeafCommitManagerPriv, 1); mgr->seaf = seaf; mgr->obj_store = seaf_obj_store_new (mgr->seaf, "commits"); return mgr; } int seaf_commit_manager_init (SeafCommitManager *mgr) { if (seaf_obj_store_init (mgr->obj_store) < 0) { seaf_warning ("[commit mgr] Failed to init commit object store.\n"); return -1; } return 0; } #if 0 inline static void add_commit_to_cache (SeafCommitManager *mgr, SeafCommit *commit) { g_hash_table_insert (mgr->priv->commit_cache, g_strdup(commit->commit_id), commit); seaf_commit_ref (commit); } inline static void remove_commit_from_cache (SeafCommitManager *mgr, SeafCommit *commit) { g_hash_table_remove (mgr->priv->commit_cache, commit->commit_id); seaf_commit_unref (commit); } #endif int seaf_commit_manager_add_commit (SeafCommitManager *mgr, SeafCommit *commit) { int ret; /* add_commit_to_cache (mgr, commit); */ if ((ret = save_commit (mgr, commit->repo_id, commit->version, commit)) < 0) return -1; return 0; } void seaf_commit_manager_del_commit (SeafCommitManager *mgr, const char *repo_id, int version, const char *id) { g_return_if_fail (id != NULL); #if 0 commit = g_hash_table_lookup(mgr->priv->commit_cache, id); if (!commit) goto delete; /* * Catch ref count bug here. We have bug in commit ref, the * following assert can't pass. TODO: fix the commit ref bug */ /* g_assert (commit->ref <= 1); */ remove_commit_from_cache (mgr, commit); delete: #endif delete_commit (mgr, repo_id, version, id); } SeafCommit* seaf_commit_manager_get_commit (SeafCommitManager *mgr, const char *repo_id, int version, const char *id) { SeafCommit *commit; #if 0 commit = g_hash_table_lookup (mgr->priv->commit_cache, id); if (commit != NULL) { seaf_commit_ref (commit); return commit; } #endif commit = load_commit (mgr, repo_id, version, id); if (!commit) return NULL; /* add_commit_to_cache (mgr, commit); */ return commit; } SeafCommit * seaf_commit_manager_get_commit_compatible (SeafCommitManager *mgr, const char *repo_id, const char *id) { SeafCommit *commit = NULL; /* First try version 1 layout. */ commit = seaf_commit_manager_get_commit (mgr, repo_id, 1, id); if (commit) return commit; #if defined MIGRATION || defined SEAFILE_CLIENT /* For compatibility with version 0. */ commit = seaf_commit_manager_get_commit (mgr, repo_id, 0, id); #endif return commit; } static gint compare_commit_by_time (gconstpointer a, gconstpointer b, gpointer unused) { const SeafCommit *commit_a = a; const SeafCommit *commit_b = b; /* Latest commit comes first in the list. */ return (commit_b->ctime - commit_a->ctime); } inline static int insert_parent_commit (GList **list, GHashTable *hash, const char *repo_id, int version, const char *parent_id, gboolean allow_truncate) { SeafCommit *p; char *key; if (g_hash_table_lookup (hash, parent_id) != NULL) return 0; p = seaf_commit_manager_get_commit (seaf->commit_mgr, repo_id, version, parent_id); if (!p) { if (allow_truncate) return 0; seaf_warning ("Failed to find commit %s\n", parent_id); return -1; } *list = g_list_insert_sorted_with_data (*list, p, compare_commit_by_time, NULL); key = g_strdup (parent_id); g_hash_table_replace (hash, key, key); return 0; } gboolean seaf_commit_manager_traverse_commit_tree_with_limit (SeafCommitManager *mgr, const char *repo_id, int version, const char *head, CommitTraverseFunc func, int limit, void *data, char **next_start_commit, gboolean skip_errors) { SeafCommit *commit; GList *list = NULL; GHashTable *commit_hash; gboolean ret = TRUE; /* A hash table for recording id of traversed commits. */ commit_hash = g_hash_table_new_full (g_str_hash, g_str_equal, g_free, NULL); commit = seaf_commit_manager_get_commit (mgr, repo_id, version, head); if (!commit) { seaf_warning ("Failed to find commit %s.\n", head); g_hash_table_destroy (commit_hash); return FALSE; } list = g_list_insert_sorted_with_data (list, commit, compare_commit_by_time, NULL); char *key = g_strdup (commit->commit_id); g_hash_table_replace (commit_hash, key, key); int count = 0; while (list) { gboolean stop = FALSE; commit = list->data; list = g_list_delete_link (list, list); if (!func (commit, data, &stop)) { if (!skip_errors) { seaf_commit_unref (commit); ret = FALSE; goto out; } } if (stop) { seaf_commit_unref (commit); /* stop traverse down from this commit, * but not stop traversing the tree */ continue; } if (commit->parent_id) { if (insert_parent_commit (&list, commit_hash, repo_id, version, commit->parent_id, FALSE) < 0) { if (!skip_errors) { seaf_commit_unref (commit); ret = FALSE; goto out; } } } if (commit->second_parent_id) { if (insert_parent_commit (&list, commit_hash, repo_id, version, commit->second_parent_id, FALSE) < 0) { if (!skip_errors) { seaf_commit_unref (commit); ret = FALSE; goto out; } } } seaf_commit_unref (commit); /* Stop when limit is reached and don't stop at unmerged branch. * If limit < 0, there is no limit; */ if (limit > 0 && ++count >= limit && (!list || !list->next)) { break; } } /* * two scenarios: * 1. list is empty, indicate scan end * 2. list only have one commit, as start for next scan */ if (list) { commit = list->data; if (next_start_commit) { *next_start_commit= g_strdup (commit->commit_id); } seaf_commit_unref (commit); list = g_list_delete_link (list, list); } out: g_hash_table_destroy (commit_hash); while (list) { commit = list->data; seaf_commit_unref (commit); list = g_list_delete_link (list, list); } return ret; } static gboolean traverse_commit_tree_common (SeafCommitManager *mgr, const char *repo_id, int version, const char *head, CommitTraverseFunc func, void *data, gboolean skip_errors, gboolean allow_truncate) { SeafCommit *commit; GList *list = NULL; GHashTable *commit_hash; gboolean ret = TRUE; commit = seaf_commit_manager_get_commit (mgr, repo_id, version, head); if (!commit) { seaf_warning ("Failed to find commit %s.\n", head); // For head commit damaged, directly return FALSE // user can repair head by fsck then retraverse the tree return FALSE; } /* A hash table for recording id of traversed commits. */ commit_hash = g_hash_table_new_full (g_str_hash, g_str_equal, g_free, NULL); list = g_list_insert_sorted_with_data (list, commit, compare_commit_by_time, NULL); char *key = g_strdup (commit->commit_id); g_hash_table_replace (commit_hash, key, key); while (list) { gboolean stop = FALSE; commit = list->data; list = g_list_delete_link (list, list); if (!func (commit, data, &stop)) { seaf_warning("[comit-mgr] CommitTraverseFunc failed\n"); /* If skip errors, continue to traverse parents. */ if (!skip_errors) { seaf_commit_unref (commit); ret = FALSE; goto out; } } if (stop) { seaf_commit_unref (commit); /* stop traverse down from this commit, * but not stop traversing the tree */ continue; } if (commit->parent_id) { if (insert_parent_commit (&list, commit_hash, repo_id, version, commit->parent_id, allow_truncate) < 0) { seaf_warning("[comit-mgr] insert parent commit failed\n"); /* If skip errors, try insert second parent. */ if (!skip_errors) { seaf_commit_unref (commit); ret = FALSE; goto out; } } } if (commit->second_parent_id) { if (insert_parent_commit (&list, commit_hash, repo_id, version, commit->second_parent_id, allow_truncate) < 0) { seaf_warning("[comit-mgr]insert second parent commit failed\n"); if (!skip_errors) { seaf_commit_unref (commit); ret = FALSE; goto out; } } } seaf_commit_unref (commit); } out: g_hash_table_destroy (commit_hash); while (list) { commit = list->data; seaf_commit_unref (commit); list = g_list_delete_link (list, list); } return ret; } gboolean seaf_commit_manager_traverse_commit_tree (SeafCommitManager *mgr, const char *repo_id, int version, const char *head, CommitTraverseFunc func, void *data, gboolean skip_errors) { return traverse_commit_tree_common (mgr, repo_id, version, head, func, data, skip_errors, FALSE); } gboolean seaf_commit_manager_traverse_commit_tree_truncated (SeafCommitManager *mgr, const char *repo_id, int version, const char *head, CommitTraverseFunc func, void *data, gboolean skip_errors) { return traverse_commit_tree_common (mgr, repo_id, version, head, func, data, skip_errors, TRUE); } gboolean seaf_commit_manager_commit_exists (SeafCommitManager *mgr, const char *repo_id, int version, const char *id) { #if 0 commit = g_hash_table_lookup (mgr->priv->commit_cache, id); if (commit != NULL) return TRUE; #endif return seaf_obj_store_obj_exists (mgr->obj_store, repo_id, version, id); } static json_t * commit_to_json_object (SeafCommit *commit) { json_t *object; object = json_object (); json_object_set_string_member (object, "commit_id", commit->commit_id); json_object_set_string_member (object, "root_id", commit->root_id); json_object_set_string_member (object, "repo_id", commit->repo_id); if (commit->creator_name) json_object_set_string_member (object, "creator_name", commit->creator_name); json_object_set_string_member (object, "creator", commit->creator_id); json_object_set_string_member (object, "description", commit->desc); json_object_set_int_member (object, "ctime", (gint64)commit->ctime); json_object_set_string_or_null_member (object, "parent_id", commit->parent_id); json_object_set_string_or_null_member (object, "second_parent_id", commit->second_parent_id); /* * also save repo's properties to commit file, for easy sharing of * repo info */ json_object_set_string_member (object, "repo_name", commit->repo_name); json_object_set_string_member (object, "repo_desc", commit->repo_desc); json_object_set_string_or_null_member (object, "repo_category", commit->repo_category); if (commit->device_name) json_object_set_string_member (object, "device_name", commit->device_name); if (commit->client_version) json_object_set_string_member (object, "client_version", commit->client_version); if (commit->encrypted) json_object_set_string_member (object, "encrypted", "true"); if (commit->encrypted) { json_object_set_int_member (object, "enc_version", commit->enc_version); // If pwd_hash is set, the magic field is no longer included in the commit of the newly created repo. if (commit->enc_version >= 1 && !commit->pwd_hash) json_object_set_string_member (object, "magic", commit->magic); if (commit->enc_version >= 2) json_object_set_string_member (object, "key", commit->random_key); if (commit->enc_version >= 3) json_object_set_string_member (object, "salt", commit->salt); if (commit->pwd_hash) { json_object_set_string_member (object, "pwd_hash", commit->pwd_hash); json_object_set_string_member (object, "pwd_hash_algo", commit->pwd_hash_algo); json_object_set_string_member (object, "pwd_hash_params", commit->pwd_hash_params); } } if (commit->no_local_history) json_object_set_int_member (object, "no_local_history", 1); if (commit->version != 0) json_object_set_int_member (object, "version", commit->version); if (commit->conflict) json_object_set_int_member (object, "conflict", 1); if (commit->new_merge) json_object_set_int_member (object, "new_merge", 1); if (commit->repaired) json_object_set_int_member (object, "repaired", 1); return object; } static SeafCommit * commit_from_json_object (const char *commit_id, json_t *object) { SeafCommit *commit = NULL; const char *root_id; const char *repo_id; const char *creator_name = NULL; const char *creator; const char *desc; gint64 ctime; const char *parent_id, *second_parent_id; const char *repo_name; const char *repo_desc; const char *repo_category; const char *device_name; const char *client_version; const char *encrypted = NULL; int enc_version = 0; const char *magic = NULL; const char *random_key = NULL; const char *salt = NULL; const char *pwd_hash = NULL; const char *pwd_hash_algo = NULL; const char *pwd_hash_params = NULL; int no_local_history = 0; int version = 0; int conflict = 0, new_merge = 0; int repaired = 0; root_id = json_object_get_string_member (object, "root_id"); repo_id = json_object_get_string_member (object, "repo_id"); if (json_object_has_member (object, "creator_name")) creator_name = json_object_get_string_or_null_member (object, "creator_name"); creator = json_object_get_string_member (object, "creator"); desc = json_object_get_string_member (object, "description"); if (!desc) desc = ""; ctime = (guint64) json_object_get_int_member (object, "ctime"); parent_id = json_object_get_string_or_null_member (object, "parent_id"); second_parent_id = json_object_get_string_or_null_member (object, "second_parent_id"); repo_name = json_object_get_string_member (object, "repo_name"); if (!repo_name) repo_name = ""; repo_desc = json_object_get_string_member (object, "repo_desc"); if (!repo_desc) repo_desc = ""; repo_category = json_object_get_string_or_null_member (object, "repo_category"); device_name = json_object_get_string_or_null_member (object, "device_name"); client_version = json_object_get_string_or_null_member (object, "client_version"); if (json_object_has_member (object, "encrypted")) encrypted = json_object_get_string_or_null_member (object, "encrypted"); if (encrypted && strcmp(encrypted, "true") == 0 && json_object_has_member (object, "enc_version")) { enc_version = json_object_get_int_member (object, "enc_version"); magic = json_object_get_string_member (object, "magic"); pwd_hash = json_object_get_string_member (object, "pwd_hash"); pwd_hash_algo = json_object_get_string_member (object, "pwd_hash_algo"); pwd_hash_params = json_object_get_string_member (object, "pwd_hash_params"); } if (enc_version >= 2) random_key = json_object_get_string_member (object, "key"); if (enc_version >= 3) salt = json_object_get_string_member (object, "salt"); if (json_object_has_member (object, "no_local_history")) no_local_history = json_object_get_int_member (object, "no_local_history"); if (json_object_has_member (object, "version")) version = json_object_get_int_member (object, "version"); if (json_object_has_member (object, "new_merge")) new_merge = json_object_get_int_member (object, "new_merge"); if (json_object_has_member (object, "conflict")) conflict = json_object_get_int_member (object, "conflict"); if (json_object_has_member (object, "repaired")) repaired = json_object_get_int_member (object, "repaired"); /* sanity check for incoming values. */ if (!repo_id || !is_uuid_valid(repo_id) || !root_id || !is_object_id_valid(root_id) || !creator || strlen(creator) != 40 || (parent_id && !is_object_id_valid(parent_id)) || (second_parent_id && !is_object_id_valid(second_parent_id))) return commit; // If pwd_hash is set, the magic field is no longer included in the commit of the newly created repo. if (!magic) magic = pwd_hash; switch (enc_version) { case 0: break; case 1: if (!magic || strlen(magic) != 32) return NULL; break; case 2: if (!magic || strlen(magic) != 64) return NULL; if (!random_key || strlen(random_key) != 96) return NULL; break; case 3: if (!magic || strlen(magic) != 64) return NULL; if (!random_key || strlen(random_key) != 96) return NULL; if (!salt || strlen(salt) != 64) return NULL; break; case 4: if (!magic || strlen(magic) != 64) return NULL; if (!random_key || strlen(random_key) != 96) return NULL; if (!salt || strlen(salt) != 64) return NULL; break; default: seaf_warning ("Unknown encryption version %d.\n", enc_version); return NULL; } char *creator_name_l = creator_name ? g_ascii_strdown (creator_name, -1) : NULL; commit = seaf_commit_new (commit_id, repo_id, root_id, creator_name_l, creator, desc, ctime); g_free (creator_name_l); commit->parent_id = parent_id ? g_strdup(parent_id) : NULL; commit->second_parent_id = second_parent_id ? g_strdup(second_parent_id) : NULL; commit->repo_name = g_strdup(repo_name); commit->repo_desc = g_strdup(repo_desc); if (encrypted && strcmp(encrypted, "true") == 0) commit->encrypted = TRUE; else commit->encrypted = FALSE; if (repo_category) commit->repo_category = g_strdup(repo_category); commit->device_name = g_strdup(device_name); commit->client_version = g_strdup(client_version); if (commit->encrypted) { commit->enc_version = enc_version; if (enc_version >= 1 && !pwd_hash) commit->magic = g_strdup(magic); if (enc_version >= 2) commit->random_key = g_strdup (random_key); if (enc_version >= 3) commit->salt = g_strdup(salt); if (pwd_hash) { commit->pwd_hash = g_strdup (pwd_hash); commit->pwd_hash_algo = g_strdup (pwd_hash_algo); commit->pwd_hash_params = g_strdup (pwd_hash_params); } } if (no_local_history) commit->no_local_history = TRUE; commit->version = version; if (new_merge) commit->new_merge = TRUE; if (conflict) commit->conflict = TRUE; if (repaired) commit->repaired = TRUE; return commit; } static SeafCommit * load_commit (SeafCommitManager *mgr, const char *repo_id, int version, const char *commit_id) { char *data = NULL; int len; SeafCommit *commit = NULL; json_t *object = NULL; json_error_t jerror; if (!commit_id || strlen(commit_id) != 40) return NULL; if (seaf_obj_store_read_obj (mgr->obj_store, repo_id, version, commit_id, (void **)&data, &len) < 0) return NULL; object = json_loadb (data, len, 0, &jerror); if (!object) { /* Perhaps the commit object contains invalid UTF-8 character. */ if (data[len-1] == 0) clean_utf8_data (data, len - 1); else clean_utf8_data (data, len); object = json_loadb (data, len, 0, &jerror); if (!object) { if (jerror.text) seaf_warning ("Failed to load commit json object: %s.\n", jerror.text); else seaf_warning ("Failed to load commit json object.\n"); goto out; } } commit = commit_from_json_object (commit_id, object); if (commit) commit->manager = mgr; out: if (object) json_decref (object); g_free (data); return commit; } static int save_commit (SeafCommitManager *manager, const char *repo_id, int version, SeafCommit *commit) { json_t *object = NULL; char *data; gsize len; if (seaf_obj_store_obj_exists (manager->obj_store, repo_id, version, commit->commit_id)) return 0; object = commit_to_json_object (commit); data = json_dumps (object, 0); len = strlen (data); json_decref (object); #ifdef SEAFILE_SERVER if (seaf_obj_store_write_obj (manager->obj_store, repo_id, version, commit->commit_id, data, (int)len, TRUE) < 0) { g_free (data); return -1; } #else if (seaf_obj_store_write_obj (manager->obj_store, repo_id, version, commit->commit_id, data, (int)len, FALSE) < 0) { g_free (data); return -1; } #endif free (data); return 0; } static void delete_commit (SeafCommitManager *mgr, const char *repo_id, int version, const char *id) { seaf_obj_store_delete_obj (mgr->obj_store, repo_id, version, id); } int seaf_commit_manager_remove_store (SeafCommitManager *mgr, const char *store_id) { return seaf_obj_store_remove_store (mgr->obj_store, store_id); } ================================================ FILE: common/commit-mgr.h ================================================ /* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */ #ifndef SEAF_COMMIT_MGR_H #define SEAF_COMMIT_MGR_H struct _SeafCommitManager; typedef struct _SeafCommit SeafCommit; #include #include "db.h" #include "obj-store.h" struct _SeafCommit { struct _SeafCommitManager *manager; int ref; char commit_id[41]; char repo_id[37]; char root_id[41]; /* the fs root */ char *desc; char *creator_name; char creator_id[41]; guint64 ctime; /* creation time */ char *parent_id; char *second_parent_id; char *repo_name; char *repo_desc; char *repo_category; char *device_name; char *client_version; gboolean encrypted; int enc_version; char *magic; char *random_key; char *salt; char *pwd_hash; char *pwd_hash_algo; char *pwd_hash_params; gboolean no_local_history; int version; gboolean new_merge; gboolean conflict; gboolean repaired; }; /** * @commit_id: if this is NULL, will create a new id. * @ctime: if this is 0, will use current time. * * Any new commit should be added to commit manager before used. */ SeafCommit * seaf_commit_new (const char *commit_id, const char *repo_id, const char *root_id, const char *author_name, const char *creator_id, const char *desc, guint64 ctime); char * seaf_commit_to_data (SeafCommit *commit, gsize *len); SeafCommit * seaf_commit_from_data (const char *id, char *data, gsize len); void seaf_commit_ref (SeafCommit *commit); void seaf_commit_unref (SeafCommit *commit); /* Set stop to TRUE if you want to stop traversing a branch in the history graph. Note, if currently there are multi branches, this function will be called again. So, set stop to TRUE not always stop traversing the history graph. */ typedef gboolean (*CommitTraverseFunc) (SeafCommit *commit, void *data, gboolean *stop); struct _SeafileSession; typedef struct _SeafCommitManager SeafCommitManager; typedef struct _SeafCommitManagerPriv SeafCommitManagerPriv; struct _SeafCommitManager { struct _SeafileSession *seaf; sqlite3 *db; struct SeafObjStore *obj_store; SeafCommitManagerPriv *priv; }; SeafCommitManager * seaf_commit_manager_new (struct _SeafileSession *seaf); int seaf_commit_manager_init (SeafCommitManager *mgr); /** * Add a commit to commit manager and persist it to disk. * Any new commit should be added to commit manager before used. * This function increments ref count of the commit object. * Not MT safe. */ int seaf_commit_manager_add_commit (SeafCommitManager *mgr, SeafCommit *commit); /** * Delete a commit from commit manager and permanently remove it from disk. * A commit object to be deleted should have ref cournt <= 1. * Not MT safe. */ void seaf_commit_manager_del_commit (SeafCommitManager *mgr, const char *repo_id, int version, const char *id); /** * Find a commit object. * This function increments ref count of returned object. * Not MT safe. */ SeafCommit* seaf_commit_manager_get_commit (SeafCommitManager *mgr, const char *repo_id, int version, const char *id); /** * Get a commit object, with compatibility between version 0 and version 1. * It will first try to get commit with version 1 layout; if fails, will * try version 0 layout for compatibility. * This is useful for loading a repo. In that case, we don't know the version * of the repo before loading its head commit. */ SeafCommit * seaf_commit_manager_get_commit_compatible (SeafCommitManager *mgr, const char *repo_id, const char *id); /** * Traverse the commits DAG start from head in topological order. * The ordering is based on commit time. * return FALSE if some commits is missing, TRUE otherwise. */ gboolean seaf_commit_manager_traverse_commit_tree (SeafCommitManager *mgr, const char *repo_id, int version, const char *head, CommitTraverseFunc func, void *data, gboolean skip_errors); /* * The same as the above function, but stops traverse down if parent commit * doesn't exists, instead of returning error. */ gboolean seaf_commit_manager_traverse_commit_tree_truncated (SeafCommitManager *mgr, const char *repo_id, int version, const char *head, CommitTraverseFunc func, void *data, gboolean skip_errors); /** * Works the same as seaf_commit_manager_traverse_commit_tree, but stops * traversing when a total number of _limit_ commits is reached. If * limit <= 0, there is no limit */ gboolean seaf_commit_manager_traverse_commit_tree_with_limit (SeafCommitManager *mgr, const char *repo_id, int version, const char *head, CommitTraverseFunc func, int limit, void *data, char **next_start_commit, gboolean skip_errors); gboolean seaf_commit_manager_commit_exists (SeafCommitManager *mgr, const char *repo_id, int version, const char *id); int seaf_commit_manager_remove_store (SeafCommitManager *mgr, const char *store_id); #endif ================================================ FILE: common/common.h ================================================ /* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */ #ifndef COMMON_H #define COMMON_H #ifdef HAVE_CONFIG_H #include #endif #include #include #include /* uint32_t */ #include /* size_t */ #include #include #include #include #include #include #include #define EMPTY_SHA1 "0000000000000000000000000000000000000000" #define CURRENT_ENC_VERSION 3 #define DEFAULT_PROTO_VERSION 1 #define CURRENT_PROTO_VERSION 7 #define CURRENT_REPO_VERSION 1 /* For compatibility with the old protocol, use an UUID for signature. * Listen manager on the server will use the new block tx protocol if it * receives this signature as "token". */ #define BLOCK_PROTOCOL_SIGNATURE "529319a0-577f-4d6b-a6c3-3c20f56f290c" #define SEAF_PATH_MAX 4096 #ifndef ccnet_warning #define ccnet_warning(fmt, ...) g_warning("%s(%d): " fmt, __FILE__, __LINE__, ##__VA_ARGS__) #endif #ifndef ccnet_error #define ccnet_error(fmt, ...) g_error("%s(%d): " fmt, __FILE__, __LINE__, ##__VA_ARGS__) #endif #ifndef ccnet_message #define ccnet_message(fmt, ...) g_message("%s(%d): " fmt, __FILE__, __LINE__, ##__VA_ARGS__) #endif #ifndef ccnet_debug #define ccnet_debug(fmt, ...) g_debug(fmt, ##__VA_ARGS__) #endif #define DEFAULT_CONFIG_DIR "~/.ccnet" #endif ================================================ FILE: common/config-mgr.c ================================================ #include "common.h" #include "config-mgr.h" #include "seaf-db.h" #include "log.h" int seaf_cfg_manager_init (SeafCfgManager *mgr) { char *sql; int db_type = seaf_db_type(mgr->db); if (db_type == SEAF_DB_TYPE_MYSQL) sql = "CREATE TABLE IF NOT EXISTS SeafileConf (" "id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, cfg_group VARCHAR(255) NOT NULL," "cfg_key VARCHAR(255) NOT NULL, value VARCHAR(255), property INTEGER) ENGINE=INNODB"; else sql = "CREATE TABLE IF NOT EXISTS SeafileConf (cfg_group VARCHAR(255) NOT NULL," "cfg_key VARCHAR(255) NOT NULL, value VARCHAR(255), property INTEGER)"; if (seaf_db_query (mgr->db, sql) < 0) return -1; return 0; } SeafCfgManager * seaf_cfg_manager_new (SeafileSession *session) { SeafCfgManager *mgr = g_new0 (SeafCfgManager, 1); if (!mgr) return NULL; mgr->config = session->config; mgr->db = session->db; return mgr; } int seaf_cfg_manager_set_config_int (SeafCfgManager *mgr, const char *group, const char *key, int value) { char value_str[256]; snprintf (value_str, sizeof(value_str), "%d", value); return seaf_cfg_manager_set_config (mgr, group, key, value_str); } int seaf_cfg_manager_set_config_int64 (SeafCfgManager *mgr, const char *group, const char *key, gint64 value) { char value_str[256]; snprintf (value_str, sizeof(value_str), "%"G_GINT64_FORMAT"", value); return seaf_cfg_manager_set_config (mgr, group, key, value_str); } int seaf_cfg_manager_set_config_string (SeafCfgManager *mgr, const char *group, const char *key, const char *value) { char value_str[256]; snprintf (value_str, sizeof(value_str), "%s", value); return seaf_cfg_manager_set_config (mgr, group, key, value_str); } int seaf_cfg_manager_set_config_boolean (SeafCfgManager *mgr, const char *group, const char *key, gboolean value) { char value_str[256]; if (value) snprintf (value_str, sizeof(value_str), "true"); else snprintf (value_str, sizeof(value_str), "false"); return seaf_cfg_manager_set_config (mgr, group, key, value_str); } int seaf_cfg_manager_set_config (SeafCfgManager *mgr, const char *group, const char *key, const char *value) { gboolean exists, err = FALSE; char *sql = "SELECT 1 FROM SeafileConf WHERE cfg_group=? AND cfg_key=?"; exists = seaf_db_statement_exists(mgr->db, sql, &err, 2, "string", group, "string", key); if (err) { seaf_warning ("[db error]Failed to set config [%s:%s] to db.\n", group, key); return -1; } if (exists) sql = "UPDATE SeafileConf SET value=? WHERE cfg_group=? AND cfg_key=?"; else sql = "INSERT INTO SeafileConf (value, cfg_group, cfg_key, property) VALUES " "(?,?,?,0)"; if (seaf_db_statement_query (mgr->db, sql, 3, "string", value, "string", group, "string", key) < 0) { seaf_warning ("Failed to set config [%s:%s] to db.\n", group, key); return -1; } return 0; } int seaf_cfg_manager_get_config_int (SeafCfgManager *mgr, const char *group, const char *key) { int ret; char *invalid = NULL; char *value = seaf_cfg_manager_get_config (mgr, group, key); if (!value) { GError *err = NULL; ret = g_key_file_get_integer (mgr->config, group, key, &err); if (err) { ret = -1; g_clear_error(&err); } } else { ret = strtol (value, &invalid, 10); if (*invalid != '\0') { ret = -1; seaf_warning ("Value of config [%s:%s] is invalid: [%s]\n", group, key, value); } g_free (value); } return ret; } gint64 seaf_cfg_manager_get_config_int64 (SeafCfgManager *mgr, const char *group, const char *key) { gint64 ret; char *invalid = NULL; char *value = seaf_cfg_manager_get_config (mgr, group, key); if (!value) { GError *err = NULL; ret = g_key_file_get_int64(mgr->config, group, key, &err); if (err) { ret = -1; g_clear_error(&err); } } else { ret = strtoll (value, &invalid, 10); if (*invalid != '\0') { seaf_warning ("Value of config [%s:%s] is invalid: [%s]\n", group, key, value); ret = -1; } g_free (value); } return ret; } gboolean seaf_cfg_manager_get_config_boolean (SeafCfgManager *mgr, const char *group, const char *key) { gboolean ret; char *value = seaf_cfg_manager_get_config (mgr, group, key); if (!value) { GError *err = NULL; ret = g_key_file_get_boolean(mgr->config, group, key, &err); if (err) { seaf_warning ("Config [%s:%s] not set, default is false.\n", group, key); ret = FALSE; g_clear_error(&err); } } else { if (strcmp ("true", value) == 0) ret = TRUE; else ret = FALSE; g_free (value); } return ret; } char * seaf_cfg_manager_get_config_string (SeafCfgManager *mgr, const char *group, const char *key) { char *ret = NULL; char *value = seaf_cfg_manager_get_config (mgr, group, key); if (!value) { ret = g_key_file_get_string (mgr->config, group, key, NULL); if (ret != NULL) ret = g_strstrip(ret); } else { ret = value; } return ret; } char * seaf_cfg_manager_get_config (SeafCfgManager *mgr, const char *group, const char *key) { char *sql = "SELECT value FROM SeafileConf WHERE cfg_group=? AND cfg_key=?"; char *value = seaf_db_statement_get_string(mgr->db, sql, 2, "string", group, "string", key); if (value != NULL) value = g_strstrip(value); return value; } ================================================ FILE: common/config-mgr.h ================================================ #ifndef SEAF_CONFIG_MGR_H #define SEAF_CONFIG_MGR_H typedef struct _SeafCfgManager SeafCfgManager; #include "seafile-session.h" struct _SeafCfgManager { GKeyFile *config; SeafDB *db; }; typedef struct _SeafileSession SeafileSession; SeafCfgManager * seaf_cfg_manager_new (SeafileSession *seaf); int seaf_cfg_manager_set_config (SeafCfgManager *mgr, const char *group, const char *key, const char *value); char * seaf_cfg_manager_get_config (SeafCfgManager *mgr, const char *group, const char *key); int seaf_cfg_manager_set_config_int (SeafCfgManager *mgr, const char *group, const char *key, int value); int seaf_cfg_manager_get_config_int (SeafCfgManager *mgr, const char *group, const char *key); int seaf_cfg_manager_set_config_int64 (SeafCfgManager *mgr, const char *group, const char *key, gint64 value); gint64 seaf_cfg_manager_get_config_int64 (SeafCfgManager *mgr, const char *group, const char *key); int seaf_cfg_manager_set_config_string (SeafCfgManager *mgr, const char *group, const char *key, const char *value); char * seaf_cfg_manager_get_config_string (SeafCfgManager *mgr, const char *group, const char *key); int seaf_cfg_manager_set_config_boolean (SeafCfgManager *mgr, const char *group, const char *key, gboolean value); gboolean seaf_cfg_manager_get_config_boolean (SeafCfgManager *mgr, const char *group, const char *key); int seaf_cfg_manager_init (SeafCfgManager *mgr); #endif /* SEAF_CONFIG_MGR_H */ ================================================ FILE: common/diff-simple.c ================================================ #include "common.h" #include "diff-simple.h" #include "utils.h" #include "log.h" DiffEntry * diff_entry_new (char type, char status, unsigned char *sha1, const char *name) { DiffEntry *de = g_new0 (DiffEntry, 1); de->type = type; de->status = status; memcpy (de->sha1, sha1, 20); de->name = g_strdup(name); return de; } DiffEntry * diff_entry_new_from_dirent (char type, char status, SeafDirent *dent, const char *basedir) { DiffEntry *de = g_new0 (DiffEntry, 1); unsigned char sha1[20]; char *path; hex_to_rawdata (dent->id, sha1, 20); path = g_strconcat (basedir, dent->name, NULL); de->type = type; de->status = status; memcpy (de->sha1, sha1, 20); de->name = path; de->size = dent->size; #ifdef SEAFILE_CLIENT if (type == DIFF_TYPE_COMMITS && (status == DIFF_STATUS_ADDED || status == DIFF_STATUS_MODIFIED || status == DIFF_STATUS_DIR_ADDED || status == DIFF_STATUS_DIR_DELETED)) { de->mtime = dent->mtime; de->mode = dent->mode; de->modifier = g_strdup(dent->modifier); } #endif return de; } void diff_entry_free (DiffEntry *de) { g_free (de->name); if (de->new_name) g_free (de->new_name); #ifdef SEAFILE_CLIENT g_free (de->modifier); #endif g_free (de); } inline static gboolean dirent_same (SeafDirent *denta, SeafDirent *dentb) { return (strcmp (dentb->id, denta->id) == 0 && denta->mode == dentb->mode && denta->mtime == dentb->mtime); } static int diff_files (int n, SeafDirent *dents[], const char *basedir, DiffOptions *opt) { SeafDirent *files[3]; int i, n_files = 0; memset (files, 0, sizeof(files[0])*n); for (i = 0; i < n; ++i) { if (dents[i] && S_ISREG(dents[i]->mode)) { files[i] = dents[i]; ++n_files; } } if (n_files == 0) return 0; return opt->file_cb (n, basedir, files, opt->data); } static int diff_trees_recursive (int n, SeafDir *trees[], const char *basedir, DiffOptions *opt); static int diff_directories (int n, SeafDirent *dents[], const char *basedir, DiffOptions *opt) { SeafDirent *dirs[3]; int i, n_dirs = 0; char *dirname = ""; int ret; SeafDir *sub_dirs[3], *dir; memset (dirs, 0, sizeof(dirs[0])*n); for (i = 0; i < n; ++i) { if (dents[i] && S_ISDIR(dents[i]->mode)) { dirs[i] = dents[i]; ++n_dirs; } } if (n_dirs == 0) return 0; gboolean recurse = TRUE; ret = opt->dir_cb (n, basedir, dirs, opt->data, &recurse); if (ret < 0) return ret; if (!recurse) return 0; memset (sub_dirs, 0, sizeof(sub_dirs[0])*n); for (i = 0; i < n; ++i) { if (dents[i] != NULL && S_ISDIR(dents[i]->mode)) { dir = seaf_fs_manager_get_seafdir (seaf->fs_mgr, opt->store_id, opt->version, dents[i]->id); if (!dir) { seaf_warning ("Failed to find dir %s:%s.\n", opt->store_id, dents[i]->id); ret = -1; goto free_sub_dirs; } sub_dirs[i] = dir; dirname = dents[i]->name; } } char *new_basedir = g_strconcat (basedir, dirname, "/", NULL); ret = diff_trees_recursive (n, sub_dirs, new_basedir, opt); g_free (new_basedir); free_sub_dirs: for (i = 0; i < n; ++i) seaf_dir_free (sub_dirs[i]); return ret; } static int diff_trees_recursive (int n, SeafDir *trees[], const char *basedir, DiffOptions *opt) { GList *ptrs[3]; SeafDirent *dents[3]; int i; SeafDirent *dent; char *first_name; gboolean done; int ret = 0; for (i = 0; i < n; ++i) { if (trees[i]) ptrs[i] = trees[i]->entries; else ptrs[i] = NULL; } while (1) { first_name = NULL; memset (dents, 0, sizeof(dents[0])*n); done = TRUE; /* Find the "largest" name, assuming dirents are sorted. */ for (i = 0; i < n; ++i) { if (ptrs[i] != NULL) { done = FALSE; dent = ptrs[i]->data; if (!first_name) first_name = dent->name; else if (strcmp(dent->name, first_name) > 0) first_name = dent->name; } } if (done) break; /* * Setup dir entries for all names that equal to first_name */ for (i = 0; i < n; ++i) { if (ptrs[i] != NULL) { dent = ptrs[i]->data; if (strcmp(first_name, dent->name) == 0) { dents[i] = dent; ptrs[i] = ptrs[i]->next; } } } if (n == 2 && dents[0] && dents[1] && dirent_same(dents[0], dents[1])) continue; if (n == 3 && dents[0] && dents[1] && dents[2] && dirent_same(dents[0], dents[1]) && dirent_same(dents[0], dents[2])) continue; /* Diff files of this level. */ ret = diff_files (n, dents, basedir, opt); if (ret < 0) return ret; /* Recurse into sub level. */ ret = diff_directories (n, dents, basedir, opt); if (ret < 0) return ret; } return ret; } int diff_trees (int n, const char *roots[], DiffOptions *opt) { SeafDir **trees, *root; int i, ret; g_return_val_if_fail (n == 2 || n == 3, -1); trees = g_new0 (SeafDir *, n); for (i = 0; i < n; ++i) { root = seaf_fs_manager_get_seafdir (seaf->fs_mgr, opt->store_id, opt->version, roots[i]); if (!root) { seaf_warning ("Failed to find dir %s:%s.\n", opt->store_id, roots[i]); g_free (trees); return -1; } trees[i] = root; } ret = diff_trees_recursive (n, trees, "", opt); for (i = 0; i < n; ++i) seaf_dir_free (trees[i]); g_free (trees); return ret; } typedef struct DiffData { GList **results; gboolean fold_dir_diff; } DiffData; static int twoway_diff_files (int n, const char *basedir, SeafDirent *files[], void *vdata) { DiffData *data = vdata; GList **results = data->results; DiffEntry *de; SeafDirent *tree1 = files[0]; SeafDirent *tree2 = files[1]; if (!tree1) { de = diff_entry_new_from_dirent (DIFF_TYPE_COMMITS, DIFF_STATUS_ADDED, tree2, basedir); *results = g_list_prepend (*results, de); return 0; } if (!tree2) { de = diff_entry_new_from_dirent (DIFF_TYPE_COMMITS, DIFF_STATUS_DELETED, tree1, basedir); *results = g_list_prepend (*results, de); return 0; } if (!dirent_same (tree1, tree2)) { de = diff_entry_new_from_dirent (DIFF_TYPE_COMMITS, DIFF_STATUS_MODIFIED, tree2, basedir); de->origin_size = tree1->size; *results = g_list_prepend (*results, de); } return 0; } static int twoway_diff_dirs (int n, const char *basedir, SeafDirent *dirs[], void *vdata, gboolean *recurse) { DiffData *data = vdata; GList **results = data->results; DiffEntry *de; SeafDirent *tree1 = dirs[0]; SeafDirent *tree2 = dirs[1]; if (!tree1) { if (strcmp (tree2->id, EMPTY_SHA1) == 0 || data->fold_dir_diff) { de = diff_entry_new_from_dirent (DIFF_TYPE_COMMITS, DIFF_STATUS_DIR_ADDED, tree2, basedir); *results = g_list_prepend (*results, de); *recurse = FALSE; } else *recurse = TRUE; return 0; } if (!tree2) { de = diff_entry_new_from_dirent (DIFF_TYPE_COMMITS, DIFF_STATUS_DIR_DELETED, tree1, basedir); *results = g_list_prepend (*results, de); if (data->fold_dir_diff) { *recurse = FALSE; } else *recurse = TRUE; return 0; } return 0; } int diff_commits (SeafCommit *commit1, SeafCommit *commit2, GList **results, gboolean fold_dir_diff) { SeafRepo *repo = NULL; DiffOptions opt; const char *roots[2]; repo = seaf_repo_manager_get_repo (seaf->repo_mgr, commit1->repo_id); if (!repo) { seaf_warning ("Failed to get repo %s.\n", commit1->repo_id); return -1; } DiffData data; memset (&data, 0, sizeof(data)); data.results = results; data.fold_dir_diff = fold_dir_diff; memset (&opt, 0, sizeof(opt)); #ifdef SEAFILE_SERVER memcpy (opt.store_id, repo->store_id, 36); #else memcpy (opt.store_id, repo->id, 36); #endif opt.version = repo->version; opt.file_cb = twoway_diff_files; opt.dir_cb = twoway_diff_dirs; opt.data = &data; #ifdef SEAFILE_SERVER seaf_repo_unref (repo); #endif roots[0] = commit1->root_id; roots[1] = commit2->root_id; diff_trees (2, roots, &opt); diff_resolve_renames (results); return 0; } int diff_commit_roots (const char *store_id, int version, const char *root1, const char *root2, GList **results, gboolean fold_dir_diff) { DiffOptions opt; const char *roots[2]; DiffData data; memset (&data, 0, sizeof(data)); data.results = results; data.fold_dir_diff = fold_dir_diff; memset (&opt, 0, sizeof(opt)); memcpy (opt.store_id, store_id, 36); opt.version = version; opt.file_cb = twoway_diff_files; opt.dir_cb = twoway_diff_dirs; opt.data = &data; roots[0] = root1; roots[1] = root2; diff_trees (2, roots, &opt); diff_resolve_renames (results); return 0; } static int threeway_diff_files (int n, const char *basedir, SeafDirent *files[], void *vdata) { DiffData *data = vdata; SeafDirent *m = files[0]; SeafDirent *p1 = files[1]; SeafDirent *p2 = files[2]; GList **results = data->results; DiffEntry *de; /* diff m with both p1 and p2. */ if (m && p1 && p2) { if (!dirent_same(m, p1) && !dirent_same (m, p2)) { de = diff_entry_new_from_dirent (DIFF_TYPE_COMMITS, DIFF_STATUS_MODIFIED, m, basedir); *results = g_list_prepend (*results, de); } } else if (!m && p1 && p2) { de = diff_entry_new_from_dirent (DIFF_TYPE_COMMITS, DIFF_STATUS_DELETED, p1, basedir); *results = g_list_prepend (*results, de); } else if (m && !p1 && p2) { if (!dirent_same (m, p2)) { de = diff_entry_new_from_dirent (DIFF_TYPE_COMMITS, DIFF_STATUS_MODIFIED, m, basedir); *results = g_list_prepend (*results, de); } } else if (m && p1 && !p2) { if (!dirent_same (m, p1)) { de = diff_entry_new_from_dirent (DIFF_TYPE_COMMITS, DIFF_STATUS_MODIFIED, m, basedir); *results = g_list_prepend (*results, de); } } else if (m && !p1 && !p2) { de = diff_entry_new_from_dirent (DIFF_TYPE_COMMITS, DIFF_STATUS_ADDED, m, basedir); *results = g_list_prepend (*results, de); } /* Nothing to do for: * 1. !m && p1 && !p2; * 2. !m && !p1 && p2; * 3. !m && !p1 && !p2 (should not happen) */ return 0; } static int threeway_diff_dirs (int n, const char *basedir, SeafDirent *dirs[], void *vdata, gboolean *recurse) { *recurse = TRUE; return 0; } int diff_merge (SeafCommit *merge, GList **results, gboolean fold_dir_diff) { SeafRepo *repo = NULL; DiffOptions opt; const char *roots[3]; SeafCommit *parent1, *parent2; g_return_val_if_fail (*results == NULL, -1); g_return_val_if_fail (merge->parent_id != NULL && merge->second_parent_id != NULL, -1); repo = seaf_repo_manager_get_repo (seaf->repo_mgr, merge->repo_id); if (!repo) { seaf_warning ("Failed to get repo %s.\n", merge->repo_id); return -1; } parent1 = seaf_commit_manager_get_commit (seaf->commit_mgr, repo->id, repo->version, merge->parent_id); if (!parent1) { seaf_warning ("failed to find commit %s:%s.\n", repo->id, merge->parent_id); return -1; } parent2 = seaf_commit_manager_get_commit (seaf->commit_mgr, repo->id, repo->version, merge->second_parent_id); if (!parent2) { seaf_warning ("failed to find commit %s:%s.\n", repo->id, merge->second_parent_id); seaf_commit_unref (parent1); return -1; } DiffData data; memset (&data, 0, sizeof(data)); data.results = results; data.fold_dir_diff = fold_dir_diff; memset (&opt, 0, sizeof(opt)); #ifdef SEAFILE_SERVER memcpy (opt.store_id, repo->store_id, 36); #else memcpy (opt.store_id, repo->id, 36); #endif opt.version = repo->version; opt.file_cb = threeway_diff_files; opt.dir_cb = threeway_diff_dirs; opt.data = &data; #ifdef SEAFILE_SERVER seaf_repo_unref (repo); #endif roots[0] = merge->root_id; roots[1] = parent1->root_id; roots[2] = parent2->root_id; int ret = diff_trees (3, roots, &opt); diff_resolve_renames (results); seaf_commit_unref (parent1); seaf_commit_unref (parent2); return ret; } int diff_merge_roots (const char *store_id, int version, const char *merged_root, const char *p1_root, const char *p2_root, GList **results, gboolean fold_dir_diff) { DiffOptions opt; const char *roots[3]; g_return_val_if_fail (*results == NULL, -1); DiffData data; memset (&data, 0, sizeof(data)); data.results = results; data.fold_dir_diff = fold_dir_diff; memset (&opt, 0, sizeof(opt)); memcpy (opt.store_id, store_id, 36); opt.version = version; opt.file_cb = threeway_diff_files; opt.dir_cb = threeway_diff_dirs; opt.data = &data; roots[0] = merged_root; roots[1] = p1_root; roots[2] = p2_root; diff_trees (3, roots, &opt); diff_resolve_renames (results); return 0; } /* This function only resolve "strict" rename, i.e. two files must be * exactly the same. * Don't detect rename of empty files and empty dirs. */ void diff_resolve_renames (GList **diff_entries) { GHashTable *deleted_files = NULL, *deleted_dirs = NULL; GList *p; GList *added = NULL; DiffEntry *de; unsigned char empty_sha1[20]; unsigned int deleted_empty_count = 0, deleted_empty_dir_count = 0; unsigned int added_empty_count = 0, added_empty_dir_count = 0; gboolean check_empty_dir, check_empty_file; memset (empty_sha1, 0, 20); /* Hash and equal functions for raw sha1. */ deleted_dirs = g_hash_table_new (ccnet_sha1_hash, ccnet_sha1_equal); deleted_files = g_hash_table_new (ccnet_sha1_hash, ccnet_sha1_equal); /* Count deleted and added entries of which content is empty. */ for (p = *diff_entries; p != NULL; p = p->next) { de = p->data; if (memcmp (de->sha1, empty_sha1, 20) == 0) { if (de->status == DIFF_STATUS_DELETED) deleted_empty_count++; if (de->status == DIFF_STATUS_DIR_DELETED) deleted_empty_dir_count++; if (de->status == DIFF_STATUS_ADDED) added_empty_count++; if (de->status == DIFF_STATUS_DIR_ADDED) added_empty_dir_count++; } } check_empty_dir = (deleted_empty_dir_count == 1 && added_empty_dir_count == 1); check_empty_file = (deleted_empty_count == 1 && added_empty_count == 1); /* Collect all "deleted" entries. */ for (p = *diff_entries; p != NULL; p = p->next) { de = p->data; if (de->status == DIFF_STATUS_DELETED) { if (memcmp (de->sha1, empty_sha1, 20) == 0 && check_empty_file == FALSE) continue; g_hash_table_insert (deleted_files, de->sha1, p); } if (de->status == DIFF_STATUS_DIR_DELETED) { if (memcmp (de->sha1, empty_sha1, 20) == 0 && check_empty_dir == FALSE) continue; g_hash_table_insert (deleted_dirs, de->sha1, p); } } /* Collect all "added" entries into a separate list. */ for (p = *diff_entries; p != NULL; p = p->next) { de = p->data; if (de->status == DIFF_STATUS_ADDED) { if (memcmp (de->sha1, empty_sha1, 20) == 0 && check_empty_file == 0) continue; added = g_list_prepend (added, p); } if (de->status == DIFF_STATUS_DIR_ADDED) { if (memcmp (de->sha1, empty_sha1, 20) == 0 && check_empty_dir == 0) continue; added = g_list_prepend (added, p); } } /* For each "added" entry, if we find a "deleted" entry with * the same content, we find a rename pair. */ p = added; while (p != NULL) { GList *p_add, *p_del; DiffEntry *de_add, *de_del, *de_rename; int rename_status; p_add = p->data; de_add = p_add->data; if (de_add->status == DIFF_STATUS_ADDED) p_del = g_hash_table_lookup (deleted_files, de_add->sha1); else p_del = g_hash_table_lookup (deleted_dirs, de_add->sha1); if (p_del) { de_del = p_del->data; if (de_add->status == DIFF_STATUS_DIR_ADDED) rename_status = DIFF_STATUS_DIR_RENAMED; else rename_status = DIFF_STATUS_RENAMED; de_rename = diff_entry_new (de_del->type, rename_status, de_del->sha1, de_del->name); de_rename->new_name = g_strdup(de_add->name); *diff_entries = g_list_delete_link (*diff_entries, p_add); *diff_entries = g_list_delete_link (*diff_entries, p_del); *diff_entries = g_list_prepend (*diff_entries, de_rename); if (de_del->status == DIFF_STATUS_DIR_DELETED) g_hash_table_remove (deleted_dirs, de_add->sha1); else g_hash_table_remove (deleted_files, de_add->sha1); diff_entry_free (de_add); diff_entry_free (de_del); } p = g_list_delete_link (p, p); } g_hash_table_destroy (deleted_dirs); g_hash_table_destroy (deleted_files); } static gboolean is_redundant_empty_dir (DiffEntry *de_dir, DiffEntry *de_file) { int dir_len; if (de_dir->status == DIFF_STATUS_DIR_ADDED && de_file->status == DIFF_STATUS_DELETED) { dir_len = strlen (de_dir->name); if (strlen (de_file->name) > dir_len && strncmp (de_dir->name, de_file->name, dir_len) == 0) return TRUE; } if (de_dir->status == DIFF_STATUS_DIR_DELETED && de_file->status == DIFF_STATUS_ADDED) { dir_len = strlen (de_dir->name); if (strlen (de_file->name) > dir_len && strncmp (de_dir->name, de_file->name, dir_len) == 0) return TRUE; } return FALSE; } /* * An empty dir entry may be added by deleting all the files under it. * Similarly, an empty dir entry may be deleted by adding some file in it. * In both cases, we don't want to include the empty dir entry in the * diff results. */ void diff_resolve_empty_dirs (GList **diff_entries) { GList *empty_dirs = NULL; GList *p, *dir, *file; DiffEntry *de, *de_dir, *de_file; for (p = *diff_entries; p != NULL; p = p->next) { de = p->data; if (de->status == DIFF_STATUS_DIR_ADDED || de->status == DIFF_STATUS_DIR_DELETED) empty_dirs = g_list_prepend (empty_dirs, p); } for (dir = empty_dirs; dir != NULL; dir = dir->next) { de_dir = ((GList *)dir->data)->data; for (file = *diff_entries; file != NULL; file = file->next) { de_file = file->data; if (is_redundant_empty_dir (de_dir, de_file)) { *diff_entries = g_list_delete_link (*diff_entries, dir->data); break; } } } g_list_free (empty_dirs); } int diff_unmerged_state(int mask) { mask >>= 1; switch (mask) { case 7: return STATUS_UNMERGED_BOTH_CHANGED; case 3: return STATUS_UNMERGED_OTHERS_REMOVED; case 5: return STATUS_UNMERGED_I_REMOVED; case 6: return STATUS_UNMERGED_BOTH_ADDED; case 2: return STATUS_UNMERGED_DFC_I_ADDED_FILE; case 4: return STATUS_UNMERGED_DFC_OTHERS_ADDED_FILE; default: seaf_warning ("Unexpected unmerged case\n"); } return 0; } char * format_diff_results(GList *results) { GList *ptr; GString *fmt_status; DiffEntry *de; fmt_status = g_string_new(""); for (ptr = results; ptr; ptr = ptr->next) { de = ptr->data; if (de->status != DIFF_STATUS_RENAMED) g_string_append_printf(fmt_status, "%c %c %d %u %s\n", de->type, de->status, de->unmerge_state, (int)strlen(de->name), de->name); else g_string_append_printf(fmt_status, "%c %c %d %u %s %u %s\n", de->type, de->status, de->unmerge_state, (int)strlen(de->name), de->name, (int)strlen(de->new_name), de->new_name); } return g_string_free(fmt_status, FALSE); } inline static char * get_basename (char *path) { char *slash; slash = strrchr (path, '/'); if (!slash) return path; return (slash + 1); } char * diff_results_to_description (GList *results) { GList *p; DiffEntry *de; char *add_mod_file = NULL, *removed_file = NULL; char *renamed_file = NULL, *renamed_dir = NULL; char *new_dir = NULL, *removed_dir = NULL; int n_add_mod = 0, n_removed = 0, n_renamed = 0; int n_new_dir = 0, n_removed_dir = 0, n_renamed_dir = 0; GString *desc; if (results == NULL) return NULL; for (p = results; p != NULL; p = p->next) { de = p->data; switch (de->status) { case DIFF_STATUS_ADDED: if (n_add_mod == 0) add_mod_file = get_basename(de->name); n_add_mod++; break; case DIFF_STATUS_DELETED: if (n_removed == 0) removed_file = get_basename(de->name); n_removed++; break; case DIFF_STATUS_RENAMED: if (n_renamed == 0) renamed_file = get_basename(de->name); n_renamed++; break; case DIFF_STATUS_MODIFIED: if (n_add_mod == 0) add_mod_file = get_basename(de->name); n_add_mod++; break; case DIFF_STATUS_DIR_ADDED: if (n_new_dir == 0) new_dir = get_basename(de->name); n_new_dir++; break; case DIFF_STATUS_DIR_DELETED: if (n_removed_dir == 0) removed_dir = get_basename(de->name); n_removed_dir++; break; case DIFF_STATUS_DIR_RENAMED: if (n_renamed_dir == 0) renamed_dir = get_basename(de->name); n_renamed_dir++; break; } } desc = g_string_new (""); if (n_add_mod == 1) g_string_append_printf (desc, "Added or modified \"%s\".\n", add_mod_file); else if (n_add_mod > 1) g_string_append_printf (desc, "Added or modified \"%s\" and %d more files.\n", add_mod_file, n_add_mod - 1); if (n_removed == 1) g_string_append_printf (desc, "Deleted \"%s\".\n", removed_file); else if (n_removed > 1) g_string_append_printf (desc, "Deleted \"%s\" and %d more files.\n", removed_file, n_removed - 1); if (n_renamed == 1) g_string_append_printf (desc, "Renamed \"%s\".\n", renamed_file); else if (n_renamed > 1) g_string_append_printf (desc, "Renamed \"%s\" and %d more files.\n", renamed_file, n_renamed - 1); if (n_new_dir == 1) g_string_append_printf (desc, "Added directory \"%s\".\n", new_dir); else if (n_new_dir > 1) g_string_append_printf (desc, "Added \"%s\" and %d more directories.\n", new_dir, n_new_dir - 1); if (n_removed_dir == 1) g_string_append_printf (desc, "Removed directory \"%s\".\n", removed_dir); else if (n_removed_dir > 1) g_string_append_printf (desc, "Removed \"%s\" and %d more directories.\n", removed_dir, n_removed_dir - 1); if (n_renamed_dir == 1) g_string_append_printf (desc, "Renamed directory \"%s\".\n", renamed_dir); else if (n_renamed_dir > 1) g_string_append_printf (desc, "Renamed \"%s\" and %d more directories.\n", renamed_dir, n_renamed_dir - 1); return g_string_free (desc, FALSE); } ================================================ FILE: common/diff-simple.h ================================================ #ifndef DIFF_SIMPLE_H #define DIFF_SIMPLE_H #include #include "seafile-session.h" #define DIFF_TYPE_WORKTREE 'W' /* diff from index to worktree */ #define DIFF_TYPE_INDEX 'I' /* diff from commit to index */ #define DIFF_TYPE_COMMITS 'C' /* diff between two commits*/ #define DIFF_STATUS_ADDED 'A' #define DIFF_STATUS_DELETED 'D' #define DIFF_STATUS_MODIFIED 'M' #define DIFF_STATUS_RENAMED 'R' #define DIFF_STATUS_UNMERGED 'U' #define DIFF_STATUS_DIR_ADDED 'B' #define DIFF_STATUS_DIR_DELETED 'C' #define DIFF_STATUS_DIR_RENAMED 'E' enum { STATUS_UNMERGED_NONE, /* I and others modified the same file differently. */ STATUS_UNMERGED_BOTH_CHANGED, /* I and others created the same file with different contents. */ STATUS_UNMERGED_BOTH_ADDED, /* I removed a file while others modified it. */ STATUS_UNMERGED_I_REMOVED, /* Others removed a file while I modified it. */ STATUS_UNMERGED_OTHERS_REMOVED, /* I replace a directory with a file while others modified files under the directory. */ STATUS_UNMERGED_DFC_I_ADDED_FILE, /* Others replace a directory with a file while I modified files under the directory. */ STATUS_UNMERGED_DFC_OTHERS_ADDED_FILE, }; typedef struct DiffEntry { char type; char status; int unmerge_state; unsigned char sha1[20]; /* used for resolve rename */ char *name; char *new_name; /* only used in rename. */ gint64 size; gint64 origin_size; /* only used in modified */ } DiffEntry; DiffEntry * diff_entry_new (char type, char status, unsigned char *sha1, const char *name); void diff_entry_free (DiffEntry *de); /* * @fold_dir_diff: if TRUE, only the top level directory will be included * in the diff result if a directory with files is added or removed. * Otherwise all the files in the direcotory will be recursively * included in the diff result. */ int diff_commits (SeafCommit *commit1, SeafCommit *commit2, GList **results, gboolean fold_dir_diff); int diff_commit_roots (const char *store_id, int version, const char *root1, const char *root2, GList **results, gboolean fold_dir_diff); int diff_merge (SeafCommit *merge, GList **results, gboolean fold_dir_diff); int diff_merge_roots (const char *store_id, int version, const char *merged_root, const char *p1_root, const char *p2_root, GList **results, gboolean fold_dir_diff); void diff_resolve_renames (GList **diff_entries); void diff_resolve_empty_dirs (GList **diff_entries); int diff_unmerged_state(int mask); char * format_diff_results(GList *results); char * diff_results_to_description (GList *results); typedef int (*DiffFileCB) (int n, const char *basedir, SeafDirent *files[], void *data); typedef int (*DiffDirCB) (int n, const char *basedir, SeafDirent *dirs[], void *data, gboolean *recurse); typedef struct DiffOptions { char store_id[37]; int version; DiffFileCB file_cb; DiffDirCB dir_cb; void *data; } DiffOptions; int diff_trees (int n, const char *roots[], DiffOptions *opt); #endif ================================================ FILE: common/fs-mgr.c ================================================ /* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */ #ifndef _GNU_SOURECE #define _GNU_SOURCE char *strcasestr (const char *haystack, const char *needle); #undef _GNU_SOURCE #endif #include "common.h" #include #include #include #ifndef WIN32 #include #endif #include #include #include "seafile-session.h" #include "seafile-error.h" #include "fs-mgr.h" #include "block-mgr.h" #include "utils.h" #include "seaf-utils.h" #define DEBUG_FLAG SEAFILE_DEBUG_OTHER #include "log.h" #include "../common/seafile-crypt.h" #ifndef SEAFILE_SERVER #include "../daemon/vc-utils.h" #include "vc-common.h" #endif /* SEAFILE_SERVER */ #include "db.h" #define SEAF_TMP_EXT "~" struct _SeafFSManagerPriv { /* GHashTable *seafile_cache; */ GHashTable *bl_cache; }; typedef struct SeafileOndisk { guint32 type; guint64 file_size; unsigned char block_ids[0]; } __attribute__((__packed__)) SeafileOndisk; typedef struct DirentOndisk { guint32 mode; char id[40]; guint32 name_len; char name[0]; } __attribute__((__packed__)) DirentOndisk; typedef struct SeafdirOndisk { guint32 type; char dirents[0]; } __attribute__((__packed__)) SeafdirOndisk; #ifndef SEAFILE_SERVER uint32_t calculate_chunk_size (uint64_t total_size); static int write_seafile (SeafFSManager *fs_mgr, const char *repo_id, int version, CDCFileDescriptor *cdc, unsigned char *obj_sha1); #endif /* SEAFILE_SERVER */ SeafFSManager * seaf_fs_manager_new (SeafileSession *seaf, const char *seaf_dir) { SeafFSManager *mgr = g_new0 (SeafFSManager, 1); mgr->seaf = seaf; mgr->obj_store = seaf_obj_store_new (seaf, "fs"); if (!mgr->obj_store) { g_free (mgr); return NULL; } mgr->priv = g_new0(SeafFSManagerPriv, 1); return mgr; } int seaf_fs_manager_init (SeafFSManager *mgr) { if (seaf_obj_store_init (mgr->obj_store) < 0) { seaf_warning ("[fs mgr] Failed to init fs object store.\n"); return -1; } return 0; } #ifndef SEAFILE_SERVER static int checkout_block (const char *repo_id, int version, const char *block_id, int wfd, SeafileCrypt *crypt) { SeafBlockManager *block_mgr = seaf->block_mgr; BlockHandle *handle; BlockMetadata *bmd; char *dec_out = NULL; int dec_out_len = -1; char *blk_content = NULL; handle = seaf_block_manager_open_block (block_mgr, repo_id, version, block_id, BLOCK_READ); if (!handle) { seaf_warning ("Failed to open block %s\n", block_id); return -1; } /* first stat the block to get its size */ bmd = seaf_block_manager_stat_block_by_handle (block_mgr, handle); if (!bmd) { seaf_warning ("can't stat block %s.\n", block_id); goto checkout_blk_error; } /* empty file, skip it */ if (bmd->size == 0) { seaf_block_manager_close_block (block_mgr, handle); seaf_block_manager_block_handle_free (block_mgr, handle); return 0; } blk_content = (char *)malloc (bmd->size * sizeof(char)); /* read the block to prepare decryption */ if (seaf_block_manager_read_block (block_mgr, handle, blk_content, bmd->size) != bmd->size) { seaf_warning ("Error when reading from block %s.\n", block_id); goto checkout_blk_error; } if (crypt != NULL) { /* An encrypted block size must be a multiple of ENCRYPT_BLK_SIZE */ if (bmd->size % ENCRYPT_BLK_SIZE != 0) { seaf_warning ("Error: An invalid encrypted block, %s \n", block_id); goto checkout_blk_error; } /* decrypt the block */ int ret = seafile_decrypt (&dec_out, &dec_out_len, blk_content, bmd->size, crypt); if (ret != 0) { seaf_warning ("Decryt block %s failed. \n", block_id); goto checkout_blk_error; } /* write the decrypted content */ ret = writen (wfd, dec_out, dec_out_len); if (ret != dec_out_len) { seaf_warning ("Failed to write the decryted block %s.\n", block_id); goto checkout_blk_error; } g_free (blk_content); g_free (dec_out); } else { /* not an encrypted block */ if (writen(wfd, blk_content, bmd->size) != bmd->size) { seaf_warning ("Failed to write the decryted block %s.\n", block_id); goto checkout_blk_error; } g_free (blk_content); } g_free (bmd); seaf_block_manager_close_block (block_mgr, handle); seaf_block_manager_block_handle_free (block_mgr, handle); return 0; checkout_blk_error: if (blk_content) free (blk_content); if (dec_out) g_free (dec_out); if (bmd) g_free (bmd); seaf_block_manager_close_block (block_mgr, handle); seaf_block_manager_block_handle_free (block_mgr, handle); return -1; } int seaf_fs_manager_checkout_file (SeafFSManager *mgr, const char *repo_id, int version, const char *file_id, const char *file_path, guint32 mode, guint64 mtime, SeafileCrypt *crypt, const char *in_repo_path, const char *conflict_head_id, gboolean force_conflict, gboolean *conflicted, const char *email) { Seafile *seafile; char *blk_id; int wfd; int i; char *tmp_path; char *conflict_path; *conflicted = FALSE; seafile = seaf_fs_manager_get_seafile (mgr, repo_id, version, file_id); if (!seafile) { seaf_warning ("File %s does not exist.\n", file_id); return -1; } tmp_path = g_strconcat (file_path, SEAF_TMP_EXT, NULL); mode_t rmode = mode & 0100 ? 0777 : 0666; wfd = seaf_util_create (tmp_path, O_WRONLY | O_TRUNC | O_CREAT | O_BINARY, rmode & ~S_IFMT); if (wfd < 0) { seaf_warning ("Failed to open file %s for checkout: %s.\n", tmp_path, strerror(errno)); goto bad; } for (i = 0; i < seafile->n_blocks; ++i) { blk_id = seafile->blk_sha1s[i]; if (checkout_block (repo_id, version, blk_id, wfd, crypt) < 0) goto bad; } close (wfd); wfd = -1; if (force_conflict || seaf_util_rename (tmp_path, file_path) < 0) { *conflicted = TRUE; /* XXX * In new syncing protocol and http sync, files are checked out before * the repo is created. So we can't get user email from repo at this point. * So a email parameter is needed. * For old syncing protocol, repo always exists when files are checked out. * This is a quick and dirty hack. A cleaner solution should modifiy the * code of old syncing protocol to pass in email too. But I don't want to * spend more time on the nearly obsoleted code. */ const char *suffix = NULL; if (email) { suffix = email; } else { SeafRepo *repo = seaf_repo_manager_get_repo (seaf->repo_mgr, repo_id); if (!repo) goto bad; suffix = email; } conflict_path = gen_conflict_path (file_path, suffix, (gint64)time(NULL)); seaf_warning ("Cannot update %s, creating conflict file %s.\n", file_path, conflict_path); /* First try to rename the local version to a conflict file, * this will preserve the version from the server. * If this fails, fall back to checking out the server version * to the conflict file. */ if (seaf_util_rename (file_path, conflict_path) == 0) { if (seaf_util_rename (tmp_path, file_path) < 0) { g_free (conflict_path); goto bad; } } else { g_free (conflict_path); conflict_path = gen_conflict_path_wrapper (repo_id, version, conflict_head_id, in_repo_path, file_path); if (!conflict_path) goto bad; if (seaf_util_rename (tmp_path, conflict_path) < 0) { g_free (conflict_path); goto bad; } } g_free (conflict_path); } if (mtime > 0) { /* * Set the checked out file mtime to what it has to be. */ if (seaf_set_file_time (file_path, mtime) < 0) { seaf_warning ("Failed to set mtime for %s.\n", file_path); } } g_free (tmp_path); seafile_unref (seafile); return 0; bad: if (wfd >= 0) close (wfd); /* Remove the tmp file if it still exists, in case that rename fails. */ seaf_util_unlink (tmp_path); g_free (tmp_path); seafile_unref (seafile); return -1; } #endif /* SEAFILE_SERVER */ static void * create_seafile_v0 (CDCFileDescriptor *cdc, int *ondisk_size, char *seafile_id) { SeafileOndisk *ondisk; rawdata_to_hex (cdc->file_sum, seafile_id, 20); *ondisk_size = sizeof(SeafileOndisk) + cdc->block_nr * 20; ondisk = (SeafileOndisk *)g_new0 (char, *ondisk_size); ondisk->type = htonl(SEAF_METADATA_TYPE_FILE); ondisk->file_size = hton64 (cdc->file_size); memcpy (ondisk->block_ids, cdc->blk_sha1s, cdc->block_nr * 20); return ondisk; } static void * create_seafile_json (int repo_version, CDCFileDescriptor *cdc, int *ondisk_size, char *seafile_id) { json_t *object, *block_id_array; object = json_object (); json_object_set_int_member (object, "type", SEAF_METADATA_TYPE_FILE); json_object_set_int_member (object, "version", seafile_version_from_repo_version(repo_version)); json_object_set_int_member (object, "size", cdc->file_size); block_id_array = json_array (); int i; uint8_t *ptr = cdc->blk_sha1s; char block_id[41]; for (i = 0; i < cdc->block_nr; ++i) { rawdata_to_hex (ptr, block_id, 20); json_array_append_new (block_id_array, json_string(block_id)); ptr += 20; } json_object_set_new (object, "block_ids", block_id_array); char *data = json_dumps (object, JSON_SORT_KEYS); *ondisk_size = strlen(data); /* The seafile object id is sha1 hash of the json object. */ unsigned char sha1[20]; calculate_sha1 (sha1, data, *ondisk_size); rawdata_to_hex (sha1, seafile_id, 20); json_decref (object); return data; } void seaf_fs_manager_calculate_seafile_id_json (int repo_version, CDCFileDescriptor *cdc, guint8 *file_id_sha1) { json_t *object, *block_id_array; object = json_object (); json_object_set_int_member (object, "type", SEAF_METADATA_TYPE_FILE); json_object_set_int_member (object, "version", seafile_version_from_repo_version(repo_version)); json_object_set_int_member (object, "size", cdc->file_size); block_id_array = json_array (); int i; uint8_t *ptr = cdc->blk_sha1s; char block_id[41]; for (i = 0; i < cdc->block_nr; ++i) { rawdata_to_hex (ptr, block_id, 20); json_array_append_new (block_id_array, json_string(block_id)); ptr += 20; } json_object_set_new (object, "block_ids", block_id_array); char *data = json_dumps (object, JSON_SORT_KEYS); int ondisk_size = strlen(data); /* The seafile object id is sha1 hash of the json object. */ calculate_sha1 (file_id_sha1, data, ondisk_size); json_decref (object); free (data); } static int write_seafile (SeafFSManager *fs_mgr, const char *repo_id, int version, CDCFileDescriptor *cdc, unsigned char *obj_sha1) { int ret = 0; char seafile_id[41]; void *ondisk; int ondisk_size; if (version > 0) { ondisk = create_seafile_json (version, cdc, &ondisk_size, seafile_id); guint8 *compressed; int outlen; if (seaf_obj_store_obj_exists (fs_mgr->obj_store, repo_id, version, seafile_id)) { ret = 0; free (ondisk); goto out; } if (seaf_compress (ondisk, ondisk_size, &compressed, &outlen) < 0) { seaf_warning ("Failed to compress seafile obj %s:%s.\n", repo_id, seafile_id); ret = -1; free (ondisk); goto out; } if (seaf_obj_store_write_obj (fs_mgr->obj_store, repo_id, version, seafile_id, compressed, outlen, FALSE) < 0) ret = -1; g_free (compressed); free (ondisk); } else { ondisk = create_seafile_v0 (cdc, &ondisk_size, seafile_id); if (seaf_obj_store_obj_exists (fs_mgr->obj_store, repo_id, version, seafile_id)) { ret = 0; g_free (ondisk); goto out; } if (seaf_obj_store_write_obj (fs_mgr->obj_store, repo_id, version, seafile_id, ondisk, ondisk_size, FALSE) < 0) ret = -1; g_free (ondisk); } out: if (ret == 0) hex_to_rawdata (seafile_id, obj_sha1, 20); return ret; } uint32_t calculate_chunk_size (uint64_t total_size) { const uint64_t GiB = 1073741824; const uint64_t MiB = 1048576; if (total_size >= (8 * GiB)) return 8 * MiB; if (total_size >= (4 * GiB)) return 4 * MiB; if (total_size >= (2 * GiB)) return 2 * MiB; return 1 * MiB; } static int do_write_chunk (const char *repo_id, int version, uint8_t *checksum, const char *buf, int len) { SeafBlockManager *blk_mgr = seaf->block_mgr; char chksum_str[41]; BlockHandle *handle; int n; rawdata_to_hex (checksum, chksum_str, 20); /* Don't write if the block already exists. */ if (seaf_block_manager_block_exists (seaf->block_mgr, repo_id, version, chksum_str)) return 0; handle = seaf_block_manager_open_block (blk_mgr, repo_id, version, chksum_str, BLOCK_WRITE); if (!handle) { seaf_warning ("Failed to open block %s.\n", chksum_str); return -1; } n = seaf_block_manager_write_block (blk_mgr, handle, buf, len); if (n < 0) { seaf_warning ("Failed to write chunk %s.\n", chksum_str); seaf_block_manager_close_block (blk_mgr, handle); seaf_block_manager_block_handle_free (blk_mgr, handle); return -1; } if (seaf_block_manager_close_block (blk_mgr, handle) < 0) { seaf_warning ("failed to close block %s.\n", chksum_str); seaf_block_manager_block_handle_free (blk_mgr, handle); return -1; } if (seaf_block_manager_commit_block (blk_mgr, handle) < 0) { seaf_warning ("failed to commit chunk %s.\n", chksum_str); seaf_block_manager_block_handle_free (blk_mgr, handle); return -1; } seaf_block_manager_block_handle_free (blk_mgr, handle); return 0; } /* write the chunk and store its checksum */ int seafile_write_chunk (const char *repo_id, int version, CDCDescriptor *chunk, SeafileCrypt *crypt, uint8_t *checksum, gboolean write_data) { SHA_CTX ctx; int ret = 0; /* Encrypt before write to disk if needed, and we don't encrypt * empty files. */ if (crypt != NULL && chunk->len) { char *encrypted_buf = NULL; /* encrypted output */ int enc_len = -1; /* encrypted length */ ret = seafile_encrypt (&encrypted_buf, /* output */ &enc_len, /* output len */ chunk->block_buf, /* input */ chunk->len, /* input len */ crypt); if (ret != 0) { seaf_warning ("Error: failed to encrypt block\n"); return -1; } SHA1_Init (&ctx); SHA1_Update (&ctx, encrypted_buf, enc_len); SHA1_Final (checksum, &ctx); if (write_data) ret = do_write_chunk (repo_id, version, checksum, encrypted_buf, enc_len); g_free (encrypted_buf); } else { /* not a encrypted repo, go ahead */ SHA1_Init (&ctx); SHA1_Update (&ctx, chunk->block_buf, chunk->len); SHA1_Final (checksum, &ctx); if (write_data) ret = do_write_chunk (repo_id, version, checksum, chunk->block_buf, chunk->len); } return ret; } static void create_cdc_for_empty_file (CDCFileDescriptor *cdc) { memset (cdc, 0, sizeof(CDCFileDescriptor)); } #if defined SEAFILE_SERVER && defined FULL_FEATURE #define FIXED_BLOCK_SIZE (1<<20) typedef struct ChunkingData { const char *repo_id; int version; const char *file_path; SeafileCrypt *crypt; guint8 *blk_sha1s; GAsyncQueue *finished_tasks; } ChunkingData; static void chunking_worker (gpointer vdata, gpointer user_data) { ChunkingData *data = user_data; CDCDescriptor *chunk = vdata; int fd = -1; ssize_t n; int idx; chunk->block_buf = g_new0 (char, chunk->len); if (!chunk->block_buf) { seaf_warning ("Failed to allow chunk buffer\n"); goto out; } fd = seaf_util_open (data->file_path, O_RDONLY | O_BINARY); if (fd < 0) { seaf_warning ("Failed to open %s: %s\n", data->file_path, strerror(errno)); chunk->result = -1; goto out; } if (seaf_util_lseek (fd, chunk->offset, SEEK_SET) == (gint64)-1) { seaf_warning ("Failed to lseek %s: %s\n", data->file_path, strerror(errno)); chunk->result = -1; goto out; } n = readn (fd, chunk->block_buf, chunk->len); if (n < 0) { seaf_warning ("Failed to read chunk from %s: %s\n", data->file_path, strerror(errno)); chunk->result = -1; goto out; } chunk->result = seafile_write_chunk (data->repo_id, data->version, chunk, data->crypt, chunk->checksum, 1); if (chunk->result < 0) goto out; idx = chunk->offset / seaf->fixed_block_size; memcpy (data->blk_sha1s + idx * CHECKSUM_LENGTH, chunk->checksum, CHECKSUM_LENGTH); out: g_free (chunk->block_buf); close (fd); g_async_queue_push (data->finished_tasks, chunk); } static int split_file_to_block (const char *repo_id, int version, const char *file_path, gint64 file_size, SeafileCrypt *crypt, CDCFileDescriptor *cdc, gboolean write_data, gint64 *indexed) { int n_blocks; uint8_t *block_sha1s = NULL; GThreadPool *tpool = NULL; GAsyncQueue *finished_tasks = NULL; GList *pending_tasks = NULL; int n_pending = 0; CDCDescriptor *chunk; int ret = 0; n_blocks = (file_size + seaf->fixed_block_size - 1) / seaf->fixed_block_size; block_sha1s = g_new0 (uint8_t, n_blocks * CHECKSUM_LENGTH); if (!block_sha1s) { seaf_warning ("Failed to allocate block_sha1s.\n"); ret = -1; goto out; } finished_tasks = g_async_queue_new (); ChunkingData data; memset (&data, 0, sizeof(data)); data.repo_id = repo_id; data.version = version; data.file_path = file_path; data.crypt = crypt; data.blk_sha1s = block_sha1s; data.finished_tasks = finished_tasks; tpool = g_thread_pool_new (chunking_worker, &data, seaf->max_indexing_threads, FALSE, NULL); if (!tpool) { seaf_warning ("Failed to allocate thread pool\n"); ret = -1; goto out; } guint64 offset = 0; guint64 len; guint64 left = (guint64)file_size; while (left > 0) { len = ((left >= seaf->fixed_block_size) ? seaf->fixed_block_size : left); chunk = g_new0 (CDCDescriptor, 1); chunk->offset = offset; chunk->len = (guint32)len; g_thread_pool_push (tpool, chunk, NULL); pending_tasks = g_list_prepend (pending_tasks, chunk); n_pending++; left -= len; offset += len; } while ((chunk = g_async_queue_pop (finished_tasks)) != NULL) { if (chunk->result < 0) { ret = -1; goto out; } if (indexed) *indexed += seaf->fixed_block_size; if ((--n_pending) <= 0) { if (indexed) *indexed = (guint64)file_size; break; } } cdc->block_nr = n_blocks; cdc->blk_sha1s = block_sha1s; out: if (tpool) g_thread_pool_free (tpool, TRUE, TRUE); if (finished_tasks) g_async_queue_unref (finished_tasks); g_list_free_full (pending_tasks, g_free); if (ret < 0) g_free (block_sha1s); return ret; } #endif /* SEAFILE_SERVER */ #define CDC_AVERAGE_BLOCK_SIZE (1 << 23) /* 8MB */ #define CDC_MIN_BLOCK_SIZE (6 * (1 << 20)) /* 6MB */ #define CDC_MAX_BLOCK_SIZE (10 * (1 << 20)) /* 10MB */ int seaf_fs_manager_index_blocks (SeafFSManager *mgr, const char *repo_id, int version, const char *file_path, unsigned char sha1[], gint64 *size, SeafileCrypt *crypt, gboolean write_data, gboolean use_cdc, gint64 *indexed) { SeafStat sb; CDCFileDescriptor cdc; if (seaf_stat (file_path, &sb) < 0) { seaf_warning ("Bad file %s: %s.\n", file_path, strerror(errno)); return -1; } g_return_val_if_fail (S_ISREG(sb.st_mode), -1); if (sb.st_size == 0) { /* handle empty file. */ memset (sha1, 0, 20); create_cdc_for_empty_file (&cdc); } else { memset (&cdc, 0, sizeof(cdc)); #if defined SEAFILE_SERVER && defined FULL_FEATURE if (use_cdc || version == 0) { cdc.block_sz = CDC_AVERAGE_BLOCK_SIZE; cdc.block_min_sz = CDC_MIN_BLOCK_SIZE; cdc.block_max_sz = CDC_MAX_BLOCK_SIZE; cdc.write_block = seafile_write_chunk; memcpy (cdc.repo_id, repo_id, 36); cdc.version = version; if (filename_chunk_cdc (file_path, &cdc, crypt, write_data, indexed) < 0) { seaf_warning ("Failed to chunk file with CDC.\n"); return -1; } } else { memcpy (cdc.repo_id, repo_id, 36); cdc.version = version; cdc.file_size = sb.st_size; if (split_file_to_block (repo_id, version, file_path, sb.st_size, crypt, &cdc, write_data, indexed) < 0) { return -1; } } #else cdc.block_sz = CDC_AVERAGE_BLOCK_SIZE; cdc.block_min_sz = CDC_MIN_BLOCK_SIZE; cdc.block_max_sz = CDC_MAX_BLOCK_SIZE; cdc.write_block = seafile_write_chunk; memcpy (cdc.repo_id, repo_id, 36); cdc.version = version; if (filename_chunk_cdc (file_path, &cdc, crypt, write_data, indexed) < 0) { seaf_warning ("Failed to chunk file with CDC.\n"); return -1; } #endif if (write_data && write_seafile (mgr, repo_id, version, &cdc, sha1) < 0) { g_free (cdc.blk_sha1s); seaf_warning ("Failed to write seafile for %s.\n", file_path); return -1; } } *size = (gint64)sb.st_size; if (cdc.blk_sha1s) free (cdc.blk_sha1s); return 0; } static int check_and_write_block (const char *repo_id, int version, const char *path, unsigned char *sha1, const char *block_id) { char *content; gsize len; GError *error = NULL; int ret = 0; if (!g_file_get_contents (path, &content, &len, &error)) { if (error) { seaf_warning ("Failed to read %s: %s.\n", path, error->message); g_clear_error (&error); return -1; } } SHA_CTX block_ctx; unsigned char checksum[20]; SHA1_Init (&block_ctx); SHA1_Update (&block_ctx, content, len); SHA1_Final (checksum, &block_ctx); if (memcmp (checksum, sha1, 20) != 0) { seaf_warning ("Block id %s:%s doesn't match content.\n", repo_id, block_id); ret = -1; goto out; } if (do_write_chunk (repo_id, version, sha1, content, len) < 0) { ret = -1; goto out; } out: g_free (content); return ret; } static int check_and_write_file_blocks (CDCFileDescriptor *cdc, GList *paths, GList *blockids) { GList *ptr, *q; SHA_CTX file_ctx; int ret = 0; SHA1_Init (&file_ctx); for (ptr = paths, q = blockids; ptr; ptr = ptr->next, q = q->next) { char *path = ptr->data; char *blk_id = q->data; unsigned char sha1[20]; hex_to_rawdata (blk_id, sha1, 20); ret = check_and_write_block (cdc->repo_id, cdc->version, path, sha1, blk_id); if (ret < 0) goto out; memcpy (cdc->blk_sha1s + cdc->block_nr * CHECKSUM_LENGTH, sha1, CHECKSUM_LENGTH); cdc->block_nr++; SHA1_Update (&file_ctx, sha1, 20); } SHA1_Final (cdc->file_sum, &file_ctx); out: return ret; } static int check_existed_file_blocks (CDCFileDescriptor *cdc, GList *blockids) { GList *q; SHA_CTX file_ctx; int ret = 0; SHA1_Init (&file_ctx); for (q = blockids; q; q = q->next) { char *blk_id = q->data; unsigned char sha1[20]; if (!seaf_block_manager_block_exists ( seaf->block_mgr, cdc->repo_id, cdc->version, blk_id)) { ret = -1; goto out; } hex_to_rawdata (blk_id, sha1, 20); memcpy (cdc->blk_sha1s + cdc->block_nr * CHECKSUM_LENGTH, sha1, CHECKSUM_LENGTH); cdc->block_nr++; SHA1_Update (&file_ctx, sha1, 20); } SHA1_Final (cdc->file_sum, &file_ctx); out: return ret; } static int init_file_cdc (CDCFileDescriptor *cdc, const char *repo_id, int version, int block_nr, gint64 file_size) { memset (cdc, 0, sizeof(CDCFileDescriptor)); cdc->file_size = file_size; cdc->blk_sha1s = (uint8_t *)calloc (sizeof(uint8_t), block_nr * CHECKSUM_LENGTH); if (!cdc->blk_sha1s) { seaf_warning ("Failed to alloc block sha1 array.\n"); return -1; } memcpy (cdc->repo_id, repo_id, 36); cdc->version = version; return 0; } int seaf_fs_manager_index_file_blocks (SeafFSManager *mgr, const char *repo_id, int version, GList *paths, GList *blockids, unsigned char sha1[], gint64 file_size) { int ret = 0; CDCFileDescriptor cdc; if (!paths) { /* handle empty file. */ memset (sha1, 0, 20); create_cdc_for_empty_file (&cdc); } else { int block_nr = g_list_length (paths); if (init_file_cdc (&cdc, repo_id, version, block_nr, file_size) < 0) { ret = -1; goto out; } if (check_and_write_file_blocks (&cdc, paths, blockids) < 0) { seaf_warning ("Failed to check and write file blocks.\n"); ret = -1; goto out; } if (write_seafile (mgr, repo_id, version, &cdc, sha1) < 0) { seaf_warning ("Failed to write seafile.\n"); ret = -1; goto out; } } out: if (cdc.blk_sha1s) free (cdc.blk_sha1s); return ret; } int seaf_fs_manager_index_raw_blocks (SeafFSManager *mgr, const char *repo_id, int version, GList *paths, GList *blockids) { int ret = 0; GList *ptr, *q; if (!paths) return -1; for (ptr = paths, q = blockids; ptr; ptr = ptr->next, q = q->next) { char *path = ptr->data; char *blk_id = q->data; unsigned char sha1[20]; hex_to_rawdata (blk_id, sha1, 20); ret = check_and_write_block (repo_id, version, path, sha1, blk_id); if (ret < 0) break; } return ret; } int seaf_fs_manager_index_existed_file_blocks (SeafFSManager *mgr, const char *repo_id, int version, GList *blockids, unsigned char sha1[], gint64 file_size) { int ret = 0; CDCFileDescriptor cdc; int block_nr = g_list_length (blockids); if (block_nr == 0) { /* handle empty file. */ memset (sha1, 0, 20); create_cdc_for_empty_file (&cdc); } else { if (init_file_cdc (&cdc, repo_id, version, block_nr, file_size) < 0) { ret = -1; goto out; } if (check_existed_file_blocks (&cdc, blockids) < 0) { seaf_warning ("Failed to check and write file blocks.\n"); ret = -1; goto out; } if (write_seafile (mgr, repo_id, version, &cdc, sha1) < 0) { seaf_warning ("Failed to write seafile.\n"); ret = -1; goto out; } } out: if (cdc.blk_sha1s) free (cdc.blk_sha1s); return ret; } void seafile_ref (Seafile *seafile) { ++seafile->ref_count; } static void seafile_free (Seafile *seafile) { int i; if (seafile->blk_sha1s) { for (i = 0; i < seafile->n_blocks; ++i) g_free (seafile->blk_sha1s[i]); g_free (seafile->blk_sha1s); } g_free (seafile); } void seafile_unref (Seafile *seafile) { if (!seafile) return; if (--seafile->ref_count <= 0) seafile_free (seafile); } static Seafile * seafile_from_v0_data (const char *id, const void *data, int len) { const SeafileOndisk *ondisk = data; Seafile *seafile; int id_list_len, n_blocks; if (len < sizeof(SeafileOndisk)) { seaf_warning ("[fs mgr] Corrupt seafile object %s.\n", id); return NULL; } if (ntohl(ondisk->type) != SEAF_METADATA_TYPE_FILE) { seaf_warning ("[fd mgr] %s is not a file.\n", id); return NULL; } id_list_len = len - sizeof(SeafileOndisk); if (id_list_len % 20 != 0) { seaf_warning ("[fs mgr] Corrupt seafile object %s.\n", id); return NULL; } n_blocks = id_list_len / 20; seafile = g_new0 (Seafile, 1); seafile->object.type = SEAF_METADATA_TYPE_FILE; seafile->version = 0; memcpy (seafile->file_id, id, 41); seafile->file_size = ntoh64 (ondisk->file_size); seafile->n_blocks = n_blocks; seafile->blk_sha1s = g_new0 (char*, seafile->n_blocks); const unsigned char *blk_sha1_ptr = ondisk->block_ids; int i; for (i = 0; i < seafile->n_blocks; ++i) { char *blk_sha1 = g_new0 (char, 41); seafile->blk_sha1s[i] = blk_sha1; rawdata_to_hex (blk_sha1_ptr, blk_sha1, 20); blk_sha1_ptr += 20; } seafile->ref_count = 1; return seafile; } static Seafile * seafile_from_json_object (const char *id, json_t *object) { json_t *block_id_array = NULL; int type; int version; guint64 file_size; Seafile *seafile = NULL; /* Sanity checks. */ type = json_object_get_int_member (object, "type"); if (type != SEAF_METADATA_TYPE_FILE) { seaf_debug ("Object %s is not a file.\n", id); return NULL; } version = (int) json_object_get_int_member (object, "version"); if (version < 1) { seaf_debug ("Seafile object %s version should be > 0, version is %d.\n", id, version); return NULL; } file_size = (guint64) json_object_get_int_member (object, "size"); block_id_array = json_object_get (object, "block_ids"); if (!block_id_array) { seaf_debug ("No block id array in seafile object %s.\n", id); return NULL; } seafile = g_new0 (Seafile, 1); seafile->object.type = SEAF_METADATA_TYPE_FILE; memcpy (seafile->file_id, id, 40); seafile->version = version; seafile->file_size = file_size; seafile->n_blocks = json_array_size (block_id_array); seafile->blk_sha1s = g_new0 (char *, seafile->n_blocks); int i; json_t *block_id_obj; const char *block_id; for (i = 0; i < seafile->n_blocks; ++i) { block_id_obj = json_array_get (block_id_array, i); block_id = json_string_value (block_id_obj); if (!block_id || !is_object_id_valid(block_id)) { seafile_free (seafile); return NULL; } seafile->blk_sha1s[i] = g_strdup(block_id); } seafile->ref_count = 1; return seafile; } static Seafile * seafile_from_json (const char *id, void *data, int len) { guint8 *decompressed; int outlen; json_t *object = NULL; json_error_t error; Seafile *seafile; if (seaf_decompress (data, len, &decompressed, &outlen) < 0) { seaf_warning ("Failed to decompress seafile object %s.\n", id); return NULL; } object = json_loadb ((const char *)decompressed, outlen, 0, &error); g_free (decompressed); if (!object) { if (error.text) seaf_warning ("Failed to load seafile json object: %s.\n", error.text); else seaf_warning ("Failed to load seafile json object.\n"); return NULL; } seafile = seafile_from_json_object (id, object); json_decref (object); return seafile; } static Seafile * seafile_from_data (const char *id, void *data, int len, gboolean is_json) { if (is_json) return seafile_from_json (id, data, len); else return seafile_from_v0_data (id, data, len); } Seafile * seaf_fs_manager_get_seafile (SeafFSManager *mgr, const char *repo_id, int version, const char *file_id) { void *data; int len; Seafile *seafile; #if 0 seafile = g_hash_table_lookup (mgr->priv->seafile_cache, file_id); if (seafile) { seafile_ref (seafile); return seafile; } #endif if (memcmp (file_id, EMPTY_SHA1, 40) == 0) { seafile = g_new0 (Seafile, 1); memset (seafile->file_id, '0', 40); seafile->ref_count = 1; return seafile; } if (seaf_obj_store_read_obj (mgr->obj_store, repo_id, version, file_id, &data, &len) < 0) { seaf_warning ("[fs mgr] Failed to read file %s.\n", file_id); return NULL; } seafile = seafile_from_data (file_id, data, len, (version > 0)); g_free (data); #if 0 /* * Add to cache. Also increase ref count. */ seafile_ref (seafile); g_hash_table_insert (mgr->priv->seafile_cache, g_strdup(file_id), seafile); #endif return seafile; } static guint8 * seafile_to_v0_data (Seafile *file, int *len) { SeafileOndisk *ondisk; *len = sizeof(SeafileOndisk) + file->n_blocks * 20; ondisk = (SeafileOndisk *)g_new0 (char, *len); ondisk->type = htonl(SEAF_METADATA_TYPE_FILE); ondisk->file_size = hton64 (file->file_size); guint8 *ptr = ondisk->block_ids; int i; for (i = 0; i < file->n_blocks; ++i) { hex_to_rawdata (file->blk_sha1s[i], ptr, 20); ptr += 20; } return (guint8 *)ondisk; } static guint8 * seafile_to_json (Seafile *file, int *len) { json_t *object, *block_id_array; object = json_object (); json_object_set_int_member (object, "type", SEAF_METADATA_TYPE_FILE); json_object_set_int_member (object, "version", file->version); json_object_set_int_member (object, "size", file->file_size); block_id_array = json_array (); int i; for (i = 0; i < file->n_blocks; ++i) { json_array_append_new (block_id_array, json_string(file->blk_sha1s[i])); } json_object_set_new (object, "block_ids", block_id_array); char *data = json_dumps (object, JSON_SORT_KEYS); *len = strlen(data); unsigned char sha1[20]; calculate_sha1 (sha1, data, *len); rawdata_to_hex (sha1, file->file_id, 20); json_decref (object); return (guint8 *)data; } static guint8 * seafile_to_data (Seafile *file, int *len) { if (file->version > 0) { guint8 *data; int orig_len; guint8 *compressed; data = seafile_to_json (file, &orig_len); if (!data) return NULL; if (seaf_compress (data, orig_len, &compressed, len) < 0) { seaf_warning ("Failed to compress file object %s.\n", file->file_id); g_free (data); return NULL; } g_free (data); return compressed; } else return seafile_to_v0_data (file, len); } int seafile_save (SeafFSManager *fs_mgr, const char *repo_id, int version, Seafile *file) { guint8 *data; int len; int ret = 0; if (seaf_obj_store_obj_exists (fs_mgr->obj_store, repo_id, version, file->file_id)) return 0; data = seafile_to_data (file, &len); if (!data) return -1; if (seaf_obj_store_write_obj (fs_mgr->obj_store, repo_id, version, file->file_id, data, len, FALSE) < 0) ret = -1; g_free (data); return ret; } static void compute_dir_id_v0 (SeafDir *dir, GList *entries) { SHA_CTX ctx; GList *p; uint8_t sha1[20]; SeafDirent *dent; guint32 mode_le; /* ID for empty dirs is EMPTY_SHA1. */ if (entries == NULL) { memset (dir->dir_id, '0', 40); return; } SHA1_Init (&ctx); for (p = entries; p; p = p->next) { dent = (SeafDirent *)p->data; SHA1_Update (&ctx, dent->id, 40); SHA1_Update (&ctx, dent->name, dent->name_len); /* Convert mode to little endian before compute. */ if (G_BYTE_ORDER == G_BIG_ENDIAN) mode_le = GUINT32_SWAP_LE_BE (dent->mode); else mode_le = dent->mode; SHA1_Update (&ctx, &mode_le, sizeof(mode_le)); } SHA1_Final (sha1, &ctx); rawdata_to_hex (sha1, dir->dir_id, 20); } SeafDir * seaf_dir_new (const char *id, GList *entries, int version) { SeafDir *dir; dir = g_new0(SeafDir, 1); dir->version = version; if (id != NULL) { memcpy(dir->dir_id, id, 40); dir->dir_id[40] = '\0'; } else if (version == 0) { compute_dir_id_v0 (dir, entries); } dir->entries = entries; if (dir->entries != NULL) dir->ondisk = seaf_dir_to_data (dir, &dir->ondisk_size); else memcpy (dir->dir_id, EMPTY_SHA1, 40); return dir; } void seaf_dir_free (SeafDir *dir) { if (dir == NULL) return; GList *ptr = dir->entries; while (ptr) { seaf_dirent_free ((SeafDirent *)ptr->data); ptr = ptr->next; } g_list_free (dir->entries); g_free (dir->ondisk); g_free(dir); } SeafDirent * seaf_dirent_new (int version, const char *sha1, int mode, const char *name, gint64 mtime, const char *modifier, gint64 size) { SeafDirent *dent; dent = g_new0 (SeafDirent, 1); dent->version = version; memcpy(dent->id, sha1, 40); dent->id[40] = '\0'; /* Mode for files must have 0644 set. To prevent the caller from forgetting, * we set the bits here. */ if (S_ISREG(mode)) dent->mode = (mode | 0644); else dent->mode = mode; dent->name = g_strdup(name); dent->name_len = strlen(name); if (version > 0) { dent->mtime = mtime; if (S_ISREG(mode)) { dent->modifier = g_strdup(modifier); dent->size = size; } } return dent; } void seaf_dirent_free (SeafDirent *dent) { if (!dent) return; g_free (dent->name); g_free (dent->modifier); g_free (dent); } SeafDirent * seaf_dirent_dup (SeafDirent *dent) { SeafDirent *new_dent; new_dent = g_memdup (dent, sizeof(SeafDirent)); new_dent->name = g_strdup(dent->name); new_dent->modifier = g_strdup(dent->modifier); return new_dent; } static SeafDir * seaf_dir_from_v0_data (const char *dir_id, const uint8_t *data, int len) { SeafDir *root; SeafDirent *dent; const uint8_t *ptr; int remain; int dirent_base_size; guint32 meta_type; guint32 name_len; ptr = data; remain = len; meta_type = get32bit (&ptr); remain -= 4; if (meta_type != SEAF_METADATA_TYPE_DIR) { seaf_warning ("Data does not contain a directory.\n"); return NULL; } root = g_new0(SeafDir, 1); root->object.type = SEAF_METADATA_TYPE_DIR; root->version = 0; memcpy(root->dir_id, dir_id, 40); root->dir_id[40] = '\0'; dirent_base_size = 2 * sizeof(guint32) + 40; while (remain > dirent_base_size) { dent = g_new0(SeafDirent, 1); dent->version = 0; dent->mode = get32bit (&ptr); memcpy (dent->id, ptr, 40); dent->id[40] = '\0'; ptr += 40; name_len = get32bit (&ptr); remain -= dirent_base_size; if (remain >= name_len) { dent->name_len = MIN (name_len, SEAF_DIR_NAME_LEN - 1); dent->name = g_strndup((const char *)ptr, dent->name_len); ptr += dent->name_len; remain -= dent->name_len; } else { seaf_warning ("Bad data format for dir objcet %s.\n", dir_id); g_free (dent); goto bad; } root->entries = g_list_prepend (root->entries, dent); } root->entries = g_list_reverse (root->entries); return root; bad: seaf_dir_free (root); return NULL; } static SeafDirent * parse_dirent (const char *dir_id, int version, json_t *object) { guint32 mode; const char *id; const char *name; gint64 mtime; const char *modifier; gint64 size; mode = (guint32) json_object_get_int_member (object, "mode"); id = json_object_get_string_member (object, "id"); if (!id) { seaf_debug ("Dirent id not set for dir object %s.\n", dir_id); return NULL; } if (!is_object_id_valid (id)) { seaf_debug ("Dirent id is invalid for dir object %s.\n", dir_id); return NULL; } name = json_object_get_string_member (object, "name"); if (!name) { seaf_debug ("Dirent name not set for dir object %s.\n", dir_id); return NULL; } mtime = json_object_get_int_member (object, "mtime"); if (S_ISREG(mode)) { modifier = json_object_get_string_member (object, "modifier"); if (!modifier) { seaf_debug ("Dirent modifier not set for dir object %s.\n", dir_id); return NULL; } size = json_object_get_int_member (object, "size"); } SeafDirent *dirent = g_new0 (SeafDirent, 1); dirent->version = version; dirent->mode = mode; memcpy (dirent->id, id, 40); dirent->name_len = strlen(name); dirent->name = g_strdup(name); dirent->mtime = mtime; if (S_ISREG(mode)) { dirent->modifier = g_strdup(modifier); dirent->size = size; } return dirent; } static SeafDir * seaf_dir_from_json_object (const char *dir_id, json_t *object) { json_t *dirent_array = NULL; int type; int version; SeafDir *dir = NULL; /* Sanity checks. */ type = json_object_get_int_member (object, "type"); if (type != SEAF_METADATA_TYPE_DIR) { seaf_debug ("Object %s is not a dir.\n", dir_id); return NULL; } version = (int) json_object_get_int_member (object, "version"); if (version < 1) { seaf_debug ("Dir object %s version should be > 0, version is %d.\n", dir_id, version); return NULL; } dirent_array = json_object_get (object, "dirents"); if (!dirent_array) { seaf_debug ("No dirents in dir object %s.\n", dir_id); return NULL; } dir = g_new0 (SeafDir, 1); dir->object.type = SEAF_METADATA_TYPE_DIR; memcpy (dir->dir_id, dir_id, 40); dir->version = version; size_t n_dirents = json_array_size (dirent_array); int i; json_t *dirent_obj; SeafDirent *dirent; for (i = 0; i < n_dirents; ++i) { dirent_obj = json_array_get (dirent_array, i); dirent = parse_dirent (dir_id, version, dirent_obj); if (!dirent) { seaf_dir_free (dir); return NULL; } dir->entries = g_list_prepend (dir->entries, dirent); } dir->entries = g_list_reverse (dir->entries); return dir; } static SeafDir * seaf_dir_from_json (const char *dir_id, uint8_t *data, int len) { guint8 *decompressed; int outlen; json_t *object = NULL; json_error_t error; SeafDir *dir; if (seaf_decompress (data, len, &decompressed, &outlen) < 0) { seaf_warning ("Failed to decompress dir object %s.\n", dir_id); return NULL; } object = json_loadb ((const char *)decompressed, outlen, 0, &error); g_free (decompressed); if (!object) { if (error.text) seaf_warning ("Failed to load seafdir json object: %s.\n", error.text); else seaf_warning ("Failed to load seafdir json object.\n"); return NULL; } dir = seaf_dir_from_json_object (dir_id, object); json_decref (object); return dir; } SeafDir * seaf_dir_from_data (const char *dir_id, uint8_t *data, int len, gboolean is_json) { if (is_json) return seaf_dir_from_json (dir_id, data, len); else return seaf_dir_from_v0_data (dir_id, data, len); } inline static int ondisk_dirent_size (SeafDirent *dirent) { return sizeof(DirentOndisk) + dirent->name_len; } static void * seaf_dir_to_v0_data (SeafDir *dir, int *len) { SeafdirOndisk *ondisk; int dir_ondisk_size = sizeof(SeafdirOndisk); GList *dirents = dir->entries; GList *ptr; SeafDirent *de; char *p; DirentOndisk *de_ondisk; for (ptr = dirents; ptr; ptr = ptr->next) { de = ptr->data; dir_ondisk_size += ondisk_dirent_size (de); } *len = dir_ondisk_size; ondisk = (SeafdirOndisk *) g_new0 (char, dir_ondisk_size); ondisk->type = htonl (SEAF_METADATA_TYPE_DIR); p = ondisk->dirents; for (ptr = dirents; ptr; ptr = ptr->next) { de = ptr->data; de_ondisk = (DirentOndisk *) p; de_ondisk->mode = htonl(de->mode); memcpy (de_ondisk->id, de->id, 40); de_ondisk->name_len = htonl (de->name_len); memcpy (de_ondisk->name, de->name, de->name_len); p += ondisk_dirent_size (de); } return (void *)ondisk; } static void add_to_dirent_array (json_t *array, SeafDirent *dirent) { json_t *object; object = json_object (); json_object_set_int_member (object, "mode", dirent->mode); json_object_set_string_member (object, "id", dirent->id); json_object_set_string_member (object, "name", dirent->name); json_object_set_int_member (object, "mtime", dirent->mtime); if (S_ISREG(dirent->mode)) { json_object_set_string_member (object, "modifier", dirent->modifier); json_object_set_int_member (object, "size", dirent->size); } json_array_append_new (array, object); } static void * seaf_dir_to_json (SeafDir *dir, int *len) { json_t *object, *dirent_array; GList *ptr; SeafDirent *dirent; object = json_object (); json_object_set_int_member (object, "type", SEAF_METADATA_TYPE_DIR); json_object_set_int_member (object, "version", dir->version); dirent_array = json_array (); for (ptr = dir->entries; ptr; ptr = ptr->next) { dirent = ptr->data; add_to_dirent_array (dirent_array, dirent); } json_object_set_new (object, "dirents", dirent_array); char *data = json_dumps (object, JSON_SORT_KEYS); *len = strlen(data); /* The dir object id is sha1 hash of the json object. */ unsigned char sha1[20]; calculate_sha1 (sha1, data, *len); rawdata_to_hex (sha1, dir->dir_id, 20); json_decref (object); return data; } void * seaf_dir_to_data (SeafDir *dir, int *len) { if (dir->version > 0) { guint8 *data; int orig_len; guint8 *compressed; data = seaf_dir_to_json (dir, &orig_len); if (!data) return NULL; if (seaf_compress (data, orig_len, &compressed, len) < 0) { seaf_warning ("Failed to compress dir object %s.\n", dir->dir_id); g_free (data); return NULL; } g_free (data); return compressed; } else return seaf_dir_to_v0_data (dir, len); } int seaf_dir_save (SeafFSManager *fs_mgr, const char *repo_id, int version, SeafDir *dir) { int ret = 0; /* Don't need to save empty dir on disk. */ if (memcmp (dir->dir_id, EMPTY_SHA1, 40) == 0) return 0; if (seaf_obj_store_obj_exists (fs_mgr->obj_store, repo_id, version, dir->dir_id)) return 0; if (seaf_obj_store_write_obj (fs_mgr->obj_store, repo_id, version, dir->dir_id, dir->ondisk, dir->ondisk_size, FALSE) < 0) ret = -1; return ret; } SeafDir * seaf_fs_manager_get_seafdir (SeafFSManager *mgr, const char *repo_id, int version, const char *dir_id) { void *data; int len; SeafDir *dir; /* TODO: add hash cache */ if (memcmp (dir_id, EMPTY_SHA1, 40) == 0) { dir = g_new0 (SeafDir, 1); dir->version = version; memset (dir->dir_id, '0', 40); return dir; } if (seaf_obj_store_read_obj (mgr->obj_store, repo_id, version, dir_id, &data, &len) < 0) { seaf_warning ("[fs mgr] Failed to read dir %s.\n", dir_id); return NULL; } dir = seaf_dir_from_data (dir_id, data, len, (version > 0)); g_free (data); return dir; } static gint compare_dirents (gconstpointer a, gconstpointer b) { const SeafDirent *denta = a, *dentb = b; return strcmp (dentb->name, denta->name); } static gboolean is_dirents_sorted (GList *dirents) { GList *ptr; SeafDirent *dent, *dent_n; gboolean ret = TRUE; for (ptr = dirents; ptr != NULL; ptr = ptr->next) { dent = ptr->data; if (!ptr->next) break; dent_n = ptr->next->data; /* If dirents are not sorted in descending order, return FALSE. */ if (strcmp (dent->name, dent_n->name) < 0) { ret = FALSE; break; } } return ret; } SeafDir * seaf_fs_manager_get_seafdir_sorted (SeafFSManager *mgr, const char *repo_id, int version, const char *dir_id) { SeafDir *dir = seaf_fs_manager_get_seafdir(mgr, repo_id, version, dir_id); if (!dir) return NULL; /* Only some very old dir objects are not sorted. */ if (version > 0) return dir; if (!is_dirents_sorted (dir->entries)) dir->entries = g_list_sort (dir->entries, compare_dirents); return dir; } SeafDir * seaf_fs_manager_get_seafdir_sorted_by_path (SeafFSManager *mgr, const char *repo_id, int version, const char *root_id, const char *path) { SeafDir *dir = seaf_fs_manager_get_seafdir_by_path (mgr, repo_id, version, root_id, path, NULL); if (!dir) return NULL; /* Only some very old dir objects are not sorted. */ if (version > 0) return dir; if (!is_dirents_sorted (dir->entries)) dir->entries = g_list_sort (dir->entries, compare_dirents); return dir; } static int parse_metadata_type_v0 (const uint8_t *data, int len) { const uint8_t *ptr = data; if (len < sizeof(guint32)) return SEAF_METADATA_TYPE_INVALID; return (int)(get32bit(&ptr)); } static int parse_metadata_type_json (const char *obj_id, uint8_t *data, int len) { guint8 *decompressed; int outlen; json_t *object; json_error_t error; int type; if (seaf_decompress (data, len, &decompressed, &outlen) < 0) { seaf_warning ("Failed to decompress fs object %s.\n", obj_id); return SEAF_METADATA_TYPE_INVALID; } object = json_loadb ((const char *)decompressed, outlen, 0, &error); g_free (decompressed); if (!object) { if (error.text) seaf_warning ("Failed to load fs json object: %s.\n", error.text); else seaf_warning ("Failed to load fs json object.\n"); return SEAF_METADATA_TYPE_INVALID; } type = json_object_get_int_member (object, "type"); json_decref (object); return type; } int seaf_metadata_type_from_data (const char *obj_id, uint8_t *data, int len, gboolean is_json) { if (is_json) return parse_metadata_type_json (obj_id, data, len); else return parse_metadata_type_v0 (data, len); } SeafFSObject * fs_object_from_v0_data (const char *obj_id, const uint8_t *data, int len) { int type = parse_metadata_type_v0 (data, len); if (type == SEAF_METADATA_TYPE_FILE) return (SeafFSObject *)seafile_from_v0_data (obj_id, data, len); else if (type == SEAF_METADATA_TYPE_DIR) return (SeafFSObject *)seaf_dir_from_v0_data (obj_id, data, len); else { seaf_warning ("Invalid object type %d.\n", type); return NULL; } } SeafFSObject * fs_object_from_json (const char *obj_id, uint8_t *data, int len) { guint8 *decompressed; int outlen; json_t *object; json_error_t error; int type; SeafFSObject *fs_obj; if (seaf_decompress (data, len, &decompressed, &outlen) < 0) { seaf_warning ("Failed to decompress fs object %s.\n", obj_id); return NULL; } object = json_loadb ((const char *)decompressed, outlen, 0, &error); g_free (decompressed); if (!object) { if (error.text) seaf_warning ("Failed to load fs json object: %s.\n", error.text); else seaf_warning ("Failed to load fs json object.\n"); return NULL; } type = json_object_get_int_member (object, "type"); if (type == SEAF_METADATA_TYPE_FILE) fs_obj = (SeafFSObject *)seafile_from_json_object (obj_id, object); else if (type == SEAF_METADATA_TYPE_DIR) fs_obj = (SeafFSObject *)seaf_dir_from_json_object (obj_id, object); else { seaf_warning ("Invalid fs type %d.\n", type); json_decref (object); return NULL; } json_decref (object); return fs_obj; } SeafFSObject * seaf_fs_object_from_data (const char *obj_id, uint8_t *data, int len, gboolean is_json) { if (is_json) return fs_object_from_json (obj_id, data, len); else return fs_object_from_v0_data (obj_id, data, len); } void seaf_fs_object_free (SeafFSObject *obj) { if (!obj) return; if (obj->type == SEAF_METADATA_TYPE_FILE) seafile_unref ((Seafile *)obj); else if (obj->type == SEAF_METADATA_TYPE_DIR) seaf_dir_free ((SeafDir *)obj); } BlockList * block_list_new () { BlockList *bl = g_new0 (BlockList, 1); bl->block_hash = g_hash_table_new_full (g_str_hash, g_str_equal, g_free, NULL); bl->block_ids = g_ptr_array_new_with_free_func (g_free); return bl; } void block_list_free (BlockList *bl) { if (bl->block_hash) g_hash_table_destroy (bl->block_hash); g_ptr_array_free (bl->block_ids, TRUE); g_free (bl); } void block_list_insert (BlockList *bl, const char *block_id) { if (g_hash_table_lookup (bl->block_hash, block_id)) return; char *key = g_strdup(block_id); g_hash_table_replace (bl->block_hash, key, key); g_ptr_array_add (bl->block_ids, g_strdup(block_id)); ++bl->n_blocks; } BlockList * block_list_difference (BlockList *bl1, BlockList *bl2) { BlockList *bl; int i; char *block_id; char *key; bl = block_list_new (); for (i = 0; i < bl1->block_ids->len; ++i) { block_id = g_ptr_array_index (bl1->block_ids, i); if (g_hash_table_lookup (bl2->block_hash, block_id) == NULL) { key = g_strdup(block_id); g_hash_table_replace (bl->block_hash, key, key); g_ptr_array_add (bl->block_ids, g_strdup(block_id)); ++bl->n_blocks; } } return bl; } static int traverse_file (SeafFSManager *mgr, const char *repo_id, int version, const char *id, TraverseFSTreeCallback callback, void *user_data, gboolean skip_errors) { gboolean stop = FALSE; if (memcmp (id, EMPTY_SHA1, 40) == 0) return 0; if (!callback (mgr, repo_id, version, id, SEAF_METADATA_TYPE_FILE, user_data, &stop) && !skip_errors) return -1; return 0; } static int traverse_dir (SeafFSManager *mgr, const char *repo_id, int version, const char *id, TraverseFSTreeCallback callback, void *user_data, gboolean skip_errors) { SeafDir *dir; GList *p; SeafDirent *seaf_dent; gboolean stop = FALSE; if (!callback (mgr, repo_id, version, id, SEAF_METADATA_TYPE_DIR, user_data, &stop) && !skip_errors) return -1; if (stop) return 0; dir = seaf_fs_manager_get_seafdir (mgr, repo_id, version, id); if (!dir) { seaf_warning ("[fs-mgr]get seafdir %s failed\n", id); if (skip_errors) return 0; return -1; } for (p = dir->entries; p; p = p->next) { seaf_dent = (SeafDirent *)p->data; if (S_ISREG(seaf_dent->mode)) { if (traverse_file (mgr, repo_id, version, seaf_dent->id, callback, user_data, skip_errors) < 0) { if (!skip_errors) { seaf_dir_free (dir); return -1; } } } else if (S_ISDIR(seaf_dent->mode)) { if (traverse_dir (mgr, repo_id, version, seaf_dent->id, callback, user_data, skip_errors) < 0) { if (!skip_errors) { seaf_dir_free (dir); return -1; } } } } seaf_dir_free (dir); return 0; } int seaf_fs_manager_traverse_tree (SeafFSManager *mgr, const char *repo_id, int version, const char *root_id, TraverseFSTreeCallback callback, void *user_data, gboolean skip_errors) { if (strcmp (root_id, EMPTY_SHA1) == 0) { return 0; } return traverse_dir (mgr, repo_id, version, root_id, callback, user_data, skip_errors); } static int traverse_dir_path (SeafFSManager *mgr, const char *repo_id, int version, const char *dir_path, SeafDirent *dent, TraverseFSPathCallback callback, void *user_data) { SeafDir *dir; GList *p; SeafDirent *seaf_dent; gboolean stop = FALSE; char *sub_path; int ret = 0; if (!callback (mgr, dir_path, dent, user_data, &stop)) return -1; if (stop) return 0; dir = seaf_fs_manager_get_seafdir (mgr, repo_id, version, dent->id); if (!dir) { seaf_warning ("get seafdir %s:%s failed\n", repo_id, dent->id); return -1; } for (p = dir->entries; p; p = p->next) { seaf_dent = (SeafDirent *)p->data; sub_path = g_strconcat (dir_path, "/", seaf_dent->name, NULL); if (S_ISREG(seaf_dent->mode)) { if (!callback (mgr, sub_path, seaf_dent, user_data, &stop)) { g_free (sub_path); ret = -1; break; } } else if (S_ISDIR(seaf_dent->mode)) { if (traverse_dir_path (mgr, repo_id, version, sub_path, seaf_dent, callback, user_data) < 0) { g_free (sub_path); ret = -1; break; } } g_free (sub_path); } seaf_dir_free (dir); return ret; } int seaf_fs_manager_traverse_path (SeafFSManager *mgr, const char *repo_id, int version, const char *root_id, const char *dir_path, TraverseFSPathCallback callback, void *user_data) { SeafDirent *dent; int ret = 0; dent = seaf_fs_manager_get_dirent_by_path (mgr, repo_id, version, root_id, dir_path, NULL); if (!dent) { seaf_warning ("Failed to get dirent for %.8s:%s.\n", repo_id, dir_path); return -1; } ret = traverse_dir_path (mgr, repo_id, version, dir_path, dent, callback, user_data); seaf_dirent_free (dent); return ret; } static gboolean fill_blocklist (SeafFSManager *mgr, const char *repo_id, int version, const char *obj_id, int type, void *user_data, gboolean *stop) { BlockList *bl = user_data; Seafile *seafile; int i; if (type == SEAF_METADATA_TYPE_FILE) { seafile = seaf_fs_manager_get_seafile (mgr, repo_id, version, obj_id); if (!seafile) { seaf_warning ("[fs mgr] Failed to find file %s.\n", obj_id); return FALSE; } for (i = 0; i < seafile->n_blocks; ++i) block_list_insert (bl, seafile->blk_sha1s[i]); seafile_unref (seafile); } return TRUE; } int seaf_fs_manager_populate_blocklist (SeafFSManager *mgr, const char *repo_id, int version, const char *root_id, BlockList *bl) { return seaf_fs_manager_traverse_tree (mgr, repo_id, version, root_id, fill_blocklist, bl, FALSE); } gboolean seaf_fs_manager_object_exists (SeafFSManager *mgr, const char *repo_id, int version, const char *id) { /* Empty file and dir always exists. */ if (memcmp (id, EMPTY_SHA1, 40) == 0) return TRUE; return seaf_obj_store_obj_exists (mgr->obj_store, repo_id, version, id); } void seaf_fs_manager_delete_object (SeafFSManager *mgr, const char *repo_id, int version, const char *id) { seaf_obj_store_delete_obj (mgr->obj_store, repo_id, version, id); } gint64 seaf_fs_manager_get_file_size (SeafFSManager *mgr, const char *repo_id, int version, const char *file_id) { Seafile *file; gint64 file_size; file = seaf_fs_manager_get_seafile (seaf->fs_mgr, repo_id, version, file_id); if (!file) { seaf_warning ("Couldn't get file %s:%s\n", repo_id, file_id); return -1; } file_size = file->file_size; seafile_unref (file); return file_size; } static gint64 get_dir_size (SeafFSManager *mgr, const char *repo_id, int version, const char *id) { SeafDir *dir; SeafDirent *seaf_dent; guint64 size = 0; gint64 result; GList *p; dir = seaf_fs_manager_get_seafdir (mgr, repo_id, version, id); if (!dir) return -1; for (p = dir->entries; p; p = p->next) { seaf_dent = (SeafDirent *)p->data; if (S_ISREG(seaf_dent->mode)) { if (dir->version > 0) result = seaf_dent->size; else { result = seaf_fs_manager_get_file_size (mgr, repo_id, version, seaf_dent->id); if (result < 0) { seaf_dir_free (dir); return result; } } size += result; } else if (S_ISDIR(seaf_dent->mode)) { result = get_dir_size (mgr, repo_id, version, seaf_dent->id); if (result < 0) { seaf_dir_free (dir); return result; } size += result; } } seaf_dir_free (dir); return size; } gint64 seaf_fs_manager_get_fs_size (SeafFSManager *mgr, const char *repo_id, int version, const char *root_id) { if (strcmp (root_id, EMPTY_SHA1) == 0) return 0; return get_dir_size (mgr, repo_id, version, root_id); } static int count_dir_files (SeafFSManager *mgr, const char *repo_id, int version, const char *id) { SeafDir *dir; SeafDirent *seaf_dent; int count = 0; int result; GList *p; dir = seaf_fs_manager_get_seafdir (mgr, repo_id, version, id); if (!dir) return -1; for (p = dir->entries; p; p = p->next) { seaf_dent = (SeafDirent *)p->data; if (S_ISREG(seaf_dent->mode)) { count ++; } else if (S_ISDIR(seaf_dent->mode)) { result = count_dir_files (mgr, repo_id, version, seaf_dent->id); if (result < 0) { seaf_dir_free (dir); return result; } count += result; } } seaf_dir_free (dir); return count; } static int get_file_count_info (SeafFSManager *mgr, const char *repo_id, int version, const char *id, gint64 *dir_count, gint64 *file_count, gint64 *size) { SeafDir *dir; SeafDirent *seaf_dent; GList *p; int ret = 0; dir = seaf_fs_manager_get_seafdir (mgr, repo_id, version, id); if (!dir) return -1; for (p = dir->entries; p; p = p->next) { seaf_dent = (SeafDirent *)p->data; if (S_ISREG(seaf_dent->mode)) { (*file_count)++; if (version > 0) (*size) += seaf_dent->size; } else if (S_ISDIR(seaf_dent->mode)) { (*dir_count)++; ret = get_file_count_info (mgr, repo_id, version, seaf_dent->id, dir_count, file_count, size); } } seaf_dir_free (dir); return ret; } int seaf_fs_manager_count_fs_files (SeafFSManager *mgr, const char *repo_id, int version, const char *root_id) { if (strcmp (root_id, EMPTY_SHA1) == 0) return 0; return count_dir_files (mgr, repo_id, version, root_id); } SeafDir * seaf_fs_manager_get_seafdir_by_path (SeafFSManager *mgr, const char *repo_id, int version, const char *root_id, const char *path, GError **error) { SeafDir *dir; SeafDirent *dent; const char *dir_id = root_id; char *name, *saveptr; char *tmp_path = g_strdup(path); dir = seaf_fs_manager_get_seafdir (mgr, repo_id, version, dir_id); if (!dir) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_DIR_MISSING, "directory is missing"); g_free (tmp_path); return NULL; } name = strtok_r (tmp_path, "/", &saveptr); while (name != NULL) { GList *l; for (l = dir->entries; l != NULL; l = l->next) { dent = l->data; if (strcmp(dent->name, name) == 0 && S_ISDIR(dent->mode)) { dir_id = dent->id; break; } } if (!l) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_PATH_NO_EXIST, "Path does not exists %s", path); seaf_dir_free (dir); dir = NULL; break; } SeafDir *prev = dir; dir = seaf_fs_manager_get_seafdir (mgr, repo_id, version, dir_id); seaf_dir_free (prev); if (!dir) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_DIR_MISSING, "directory is missing"); break; } name = strtok_r (NULL, "/", &saveptr); } g_free (tmp_path); return dir; } char * seaf_fs_manager_path_to_obj_id (SeafFSManager *mgr, const char *repo_id, int version, const char *root_id, const char *path, guint32 *mode, GError **error) { char *copy = g_strdup (path); int off = strlen(copy) - 1; char *slash, *name; SeafDir *base_dir = NULL; SeafDirent *dent; GList *p; char *obj_id = NULL; while (off >= 0 && copy[off] == '/') copy[off--] = 0; if (strlen(copy) == 0) { /* the path is root "/" */ if (mode) { *mode = S_IFDIR; } obj_id = g_strdup(root_id); goto out; } slash = strrchr (copy, '/'); if (!slash) { base_dir = seaf_fs_manager_get_seafdir (mgr, repo_id, version, root_id); if (!base_dir) { seaf_warning ("Failed to find root dir %s.\n", root_id); g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, " "); goto out; } name = copy; } else { *slash = 0; name = slash + 1; GError *tmp_error = NULL; base_dir = seaf_fs_manager_get_seafdir_by_path (mgr, repo_id, version, root_id, copy, &tmp_error); if (tmp_error && !g_error_matches(tmp_error, SEAFILE_DOMAIN, SEAF_ERR_PATH_NO_EXIST)) { seaf_warning ("Failed to get dir for %s.\n", copy); g_propagate_error (error, tmp_error); goto out; } /* The path doesn't exist in this commit. */ if (!base_dir) { g_propagate_error (error, tmp_error); goto out; } } for (p = base_dir->entries; p != NULL; p = p->next) { dent = p->data; if (!is_object_id_valid (dent->id)) continue; if (strcmp (dent->name, name) == 0) { obj_id = g_strdup (dent->id); if (mode) { *mode = dent->mode; } break; } } out: if (base_dir) seaf_dir_free (base_dir); g_free (copy); return obj_id; } char * seaf_fs_manager_get_seafile_id_by_path (SeafFSManager *mgr, const char *repo_id, int version, const char *root_id, const char *path, GError **error) { guint32 mode; char *file_id; file_id = seaf_fs_manager_path_to_obj_id (mgr, repo_id, version, root_id, path, &mode, error); if (!file_id) return NULL; if (file_id && S_ISDIR(mode)) { g_free (file_id); return NULL; } return file_id; } char * seaf_fs_manager_get_seafdir_id_by_path (SeafFSManager *mgr, const char *repo_id, int version, const char *root_id, const char *path, GError **error) { guint32 mode = 0; char *dir_id; dir_id = seaf_fs_manager_path_to_obj_id (mgr, repo_id, version, root_id, path, &mode, error); if (!dir_id) return NULL; if (dir_id && !S_ISDIR(mode)) { g_free (dir_id); return NULL; } return dir_id; } SeafDirent * seaf_fs_manager_get_dirent_by_path (SeafFSManager *mgr, const char *repo_id, int version, const char *root_id, const char *path, GError **error) { SeafDirent *dent = NULL; SeafDir *dir = NULL; char *parent_dir = NULL; char *file_name = NULL; parent_dir = g_path_get_dirname(path); file_name = g_path_get_basename(path); if (strcmp (parent_dir, ".") == 0) { dir = seaf_fs_manager_get_seafdir (mgr, repo_id, version, root_id); if (!dir) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_DIR_MISSING, "directory is missing"); } } else dir = seaf_fs_manager_get_seafdir_by_path (mgr, repo_id, version, root_id, parent_dir, error); if (!dir) { goto out; } GList *p; for (p = dir->entries; p; p = p->next) { SeafDirent *d = p->data; if (strcmp (d->name, file_name) == 0) { dent = seaf_dirent_dup(d); break; } } out: if (dir) seaf_dir_free (dir); g_free (parent_dir); g_free (file_name); return dent; } static gboolean verify_seafdir_v0 (const char *dir_id, const uint8_t *data, int len, gboolean verify_id) { guint32 meta_type; guint32 mode; char id[41]; guint32 name_len; char name[SEAF_DIR_NAME_LEN]; const uint8_t *ptr; int remain; int dirent_base_size; SHA_CTX ctx; uint8_t sha1[20]; char check_id[41]; if (len < sizeof(SeafdirOndisk)) { seaf_warning ("[fs mgr] Corrupt seafdir object %s.\n", dir_id); return FALSE; } ptr = data; remain = len; meta_type = get32bit (&ptr); remain -= 4; if (meta_type != SEAF_METADATA_TYPE_DIR) { seaf_warning ("Data does not contain a directory.\n"); return FALSE; } if (verify_id) SHA1_Init (&ctx); dirent_base_size = 2 * sizeof(guint32) + 40; while (remain > dirent_base_size) { mode = get32bit (&ptr); memcpy (id, ptr, 40); id[40] = '\0'; ptr += 40; name_len = get32bit (&ptr); remain -= dirent_base_size; if (remain >= name_len) { name_len = MIN (name_len, SEAF_DIR_NAME_LEN - 1); memcpy (name, ptr, name_len); ptr += name_len; remain -= name_len; } else { seaf_warning ("Bad data format for dir objcet %s.\n", dir_id); return FALSE; } if (verify_id) { /* Convert mode to little endian before compute. */ if (G_BYTE_ORDER == G_BIG_ENDIAN) mode = GUINT32_SWAP_LE_BE (mode); SHA1_Update (&ctx, id, 40); SHA1_Update (&ctx, name, name_len); SHA1_Update (&ctx, &mode, sizeof(mode)); } } if (!verify_id) return TRUE; SHA1_Final (sha1, &ctx); rawdata_to_hex (sha1, check_id, 20); if (strcmp (check_id, dir_id) == 0) return TRUE; else return FALSE; } static gboolean verify_fs_object_json (const char *obj_id, uint8_t *data, int len) { guint8 *decompressed; int outlen; unsigned char sha1[20]; char hex[41]; if (seaf_decompress (data, len, &decompressed, &outlen) < 0) { seaf_warning ("Failed to decompress fs object %s.\n", obj_id); return FALSE; } calculate_sha1 (sha1, (const char *)decompressed, outlen); rawdata_to_hex (sha1, hex, 20); g_free (decompressed); return (strcmp(hex, obj_id) == 0); } static gboolean verify_seafdir (const char *dir_id, uint8_t *data, int len, gboolean verify_id, gboolean is_json) { if (is_json) return verify_fs_object_json (dir_id, data, len); else return verify_seafdir_v0 (dir_id, data, len, verify_id); } gboolean seaf_fs_manager_verify_seafdir (SeafFSManager *mgr, const char *repo_id, int version, const char *dir_id, gboolean verify_id, gboolean *io_error) { void *data; int len; if (memcmp (dir_id, EMPTY_SHA1, 40) == 0) { return TRUE; } if (seaf_obj_store_read_obj (mgr->obj_store, repo_id, version, dir_id, &data, &len) < 0) { seaf_warning ("[fs mgr] Failed to read dir %s:%s.\n", repo_id, dir_id); *io_error = TRUE; return FALSE; } gboolean ret = verify_seafdir (dir_id, data, len, verify_id, (version > 0)); g_free (data); return ret; } static gboolean verify_seafile_v0 (const char *id, const void *data, int len, gboolean verify_id) { const SeafileOndisk *ondisk = data; SHA_CTX ctx; uint8_t sha1[20]; char check_id[41]; if (len < sizeof(SeafileOndisk)) { seaf_warning ("[fs mgr] Corrupt seafile object %s.\n", id); return FALSE; } if (ntohl(ondisk->type) != SEAF_METADATA_TYPE_FILE) { seaf_warning ("[fd mgr] %s is not a file.\n", id); return FALSE; } int id_list_length = len - sizeof(SeafileOndisk); if (id_list_length % 20 != 0) { seaf_warning ("[fs mgr] Bad seafile id list length %d.\n", id_list_length); return FALSE; } if (!verify_id) return TRUE; SHA1_Init (&ctx); SHA1_Update (&ctx, ondisk->block_ids, len - sizeof(SeafileOndisk)); SHA1_Final (sha1, &ctx); rawdata_to_hex (sha1, check_id, 20); if (strcmp (check_id, id) == 0) return TRUE; else return FALSE; } static gboolean verify_seafile (const char *id, void *data, int len, gboolean verify_id, gboolean is_json) { if (is_json) return verify_fs_object_json (id, data, len); else return verify_seafile_v0 (id, data, len, verify_id); } gboolean seaf_fs_manager_verify_seafile (SeafFSManager *mgr, const char *repo_id, int version, const char *file_id, gboolean verify_id, gboolean *io_error) { void *data; int len; if (memcmp (file_id, EMPTY_SHA1, 40) == 0) { return TRUE; } if (seaf_obj_store_read_obj (mgr->obj_store, repo_id, version, file_id, &data, &len) < 0) { seaf_warning ("[fs mgr] Failed to read file %s:%s.\n", repo_id, file_id); *io_error = TRUE; return FALSE; } gboolean ret = verify_seafile (file_id, data, len, verify_id, (version > 0)); g_free (data); return ret; } static gboolean verify_fs_object_v0 (const char *obj_id, uint8_t *data, int len, gboolean verify_id) { gboolean ret = TRUE; int type = seaf_metadata_type_from_data (obj_id, data, len, FALSE); switch (type) { case SEAF_METADATA_TYPE_FILE: ret = verify_seafile_v0 (obj_id, data, len, verify_id); break; case SEAF_METADATA_TYPE_DIR: ret = verify_seafdir_v0 (obj_id, data, len, verify_id); break; default: seaf_warning ("Invalid meta data type: %d.\n", type); return FALSE; } return ret; } gboolean seaf_fs_manager_verify_object (SeafFSManager *mgr, const char *repo_id, int version, const char *obj_id, gboolean verify_id, gboolean *io_error) { void *data; int len; gboolean ret = TRUE; if (memcmp (obj_id, EMPTY_SHA1, 40) == 0) { return TRUE; } if (seaf_obj_store_read_obj (mgr->obj_store, repo_id, version, obj_id, &data, &len) < 0) { seaf_warning ("[fs mgr] Failed to read object %s:%s.\n", repo_id, obj_id); *io_error = TRUE; return FALSE; } if (version == 0) ret = verify_fs_object_v0 (obj_id, data, len, verify_id); else ret = verify_fs_object_json (obj_id, data, len); g_free (data); return ret; } int dir_version_from_repo_version (int repo_version) { if (repo_version == 0) return 0; else return CURRENT_DIR_OBJ_VERSION; } int seafile_version_from_repo_version (int repo_version) { if (repo_version == 0) return 0; else return CURRENT_SEAFILE_OBJ_VERSION; } int seaf_fs_manager_remove_store (SeafFSManager *mgr, const char *store_id) { return seaf_obj_store_remove_store (mgr->obj_store, store_id); } GObject * seaf_fs_manager_get_file_count_info_by_path (SeafFSManager *mgr, const char *repo_id, int version, const char *root_id, const char *path, GError **error) { char *dir_id = NULL; gint64 file_count = 0, dir_count = 0, size = 0; SeafileFileCountInfo *info = NULL; dir_id = seaf_fs_manager_get_seafdir_id_by_path (mgr, repo_id, version, root_id, path, NULL); if (!dir_id) { seaf_warning ("Path %s doesn't exist or is not a dir in repo %.10s.\n", path, repo_id); g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Bad path"); goto out; } if (get_file_count_info (mgr, repo_id, version, dir_id, &dir_count, &file_count, &size) < 0) { seaf_warning ("Failed to get count info from path %s in repo %.10s.\n", path, repo_id); goto out; } info = g_object_new (SEAFILE_TYPE_FILE_COUNT_INFO, "file_count", file_count, "dir_count", dir_count, "size", size, NULL); out: g_free (dir_id); return (GObject *)info; } static int search_files_recursive (SeafFSManager *mgr, const char *repo_id, const char *path, const char *id, const char *str, int version, GList **file_list) { SeafDir *dir; GList *p; SeafDirent *seaf_dent; int ret = 0; char *full_path = NULL; dir = seaf_fs_manager_get_seafdir (mgr, repo_id, version, id); if (!dir) { seaf_warning ("[fs-mgr]get seafdir %s failed\n", id); return -1; } for (p = dir->entries; p; p = p->next) { seaf_dent = (SeafDirent *)p->data; full_path = g_strconcat (path, "/", seaf_dent->name, NULL); if (seaf_dent->name && strcasestr (seaf_dent->name, str) != NULL) { SearchResult *sr = g_new0(SearchResult, 1); sr->path = g_strdup (full_path); sr->size = seaf_dent->size; sr->mtime = seaf_dent->mtime; *file_list = g_list_prepend (*file_list, sr); if (S_ISDIR(seaf_dent->mode)) { sr->is_dir = TRUE; } } if (S_ISDIR(seaf_dent->mode)) { if (search_files_recursive (mgr, repo_id, full_path, seaf_dent->id, str, version, file_list) < 0) { g_free (full_path); ret = -1; break; } } g_free (full_path); } seaf_dir_free (dir); return ret; } GList * seaf_fs_manager_search_files_by_path (SeafFSManager *mgr, const char *repo_id, const char *path, const char *str) { GList *file_list = NULL; SeafCommit *head = NULL; SeafRepo *repo = seaf_repo_manager_get_repo (seaf->repo_mgr, repo_id); if (!repo) { seaf_warning ("Failed to find repo %s\n", repo_id); goto out; } head = seaf_commit_manager_get_commit (seaf->commit_mgr,repo->id, repo->version, repo->head->commit_id); if (!head) { seaf_warning ("Failed to find commit %s\n", repo->head->commit_id); goto out; } if (!path || g_strcmp0 (path, "/") == 0) { search_files_recursive (mgr, repo->store_id, "", head->root_id, str, repo->version, &file_list); } else { char *dir_id = seaf_fs_manager_get_seafdir_id_by_path (mgr, repo->store_id, repo->version, head->root_id, path, NULL); if (!dir_id) { seaf_warning ("Path %s doesn't exist or is not a dir in repo %.10s.\n", path, repo->store_id); goto out; } search_files_recursive (mgr, repo->store_id, path, dir_id, str, repo->version, &file_list); g_free (dir_id); } out: seaf_repo_unref (repo); seaf_commit_unref (head); return file_list; } ================================================ FILE: common/fs-mgr.h ================================================ /* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */ #ifndef SEAF_FILE_MGR_H #define SEAF_FILE_MGR_H #include #include "seafile-object.h" #include "obj-store.h" #include "cdc/cdc.h" #include "../common/seafile-crypt.h" #define CURRENT_DIR_OBJ_VERSION 1 #define CURRENT_SEAFILE_OBJ_VERSION 1 typedef struct _SeafFSManager SeafFSManager; typedef struct _SeafFSObject SeafFSObject; typedef struct _Seafile Seafile; typedef struct _SeafDir SeafDir; typedef struct _SeafDirent SeafDirent; typedef enum { SEAF_METADATA_TYPE_INVALID, SEAF_METADATA_TYPE_FILE, SEAF_METADATA_TYPE_LINK, SEAF_METADATA_TYPE_DIR, } SeafMetadataType; /* Common to seafile and seafdir objects. */ struct _SeafFSObject { int type; }; struct _Seafile { SeafFSObject object; int version; char file_id[41]; guint64 file_size; guint32 n_blocks; char **blk_sha1s; int ref_count; }; typedef struct SearchResult { char *path; gint64 size; gint64 mtime; gboolean is_dir; } SearchResult; void seafile_ref (Seafile *seafile); void seafile_unref (Seafile *seafile); int seafile_save (SeafFSManager *fs_mgr, const char *repo_id, int version, Seafile *file); #define SEAF_DIR_NAME_LEN 256 struct _SeafDirent { int version; guint32 mode; char id[41]; guint32 name_len; char *name; /* attributes for version > 0 */ gint64 mtime; char *modifier; /* for files only */ gint64 size; /* for files only */ }; struct _SeafDir { SeafFSObject object; int version; char dir_id[41]; GList *entries; /* data in on-disk format. */ void *ondisk; int ondisk_size; }; SeafDir * seaf_dir_new (const char *id, GList *entries, int version); void seaf_dir_free (SeafDir *dir); SeafDir * seaf_dir_from_data (const char *dir_id, uint8_t *data, int len, gboolean is_json); void * seaf_dir_to_data (SeafDir *dir, int *len); int seaf_dir_save (SeafFSManager *fs_mgr, const char *repo_id, int version, SeafDir *dir); SeafDirent * seaf_dirent_new (int version, const char *sha1, int mode, const char *name, gint64 mtime, const char *modifier, gint64 size); void seaf_dirent_free (SeafDirent *dent); SeafDirent * seaf_dirent_dup (SeafDirent *dent); int seaf_metadata_type_from_data (const char *obj_id, uint8_t *data, int len, gboolean is_json); /* Parse an fs object without knowing its type. */ SeafFSObject * seaf_fs_object_from_data (const char *obj_id, uint8_t *data, int len, gboolean is_json); void seaf_fs_object_free (SeafFSObject *obj); typedef struct { /* TODO: GHashTable may be inefficient when we have large number of IDs. */ GHashTable *block_hash; GPtrArray *block_ids; uint32_t n_blocks; uint32_t n_valid_blocks; } BlockList; BlockList * block_list_new (); void block_list_free (BlockList *bl); void block_list_insert (BlockList *bl, const char *block_id); /* Return a blocklist containing block ids which are in @bl1 but * not in @bl2. */ BlockList * block_list_difference (BlockList *bl1, BlockList *bl2); struct _SeafileSession; typedef struct _SeafFSManagerPriv SeafFSManagerPriv; struct _SeafFSManager { struct _SeafileSession *seaf; struct SeafObjStore *obj_store; SeafFSManagerPriv *priv; }; SeafFSManager * seaf_fs_manager_new (struct _SeafileSession *seaf, const char *seaf_dir); int seaf_fs_manager_init (SeafFSManager *mgr); #ifndef SEAFILE_SERVER int seaf_fs_manager_checkout_file (SeafFSManager *mgr, const char *repo_id, int version, const char *file_id, const char *file_path, guint32 mode, guint64 mtime, struct SeafileCrypt *crypt, const char *in_repo_path, const char *conflict_head_id, gboolean force_conflict, gboolean *conflicted, const char *email); #endif /* not SEAFILE_SERVER */ /** * Check in blocks and create seafile/symlink object. * Returns sha1 id for the seafile/symlink object in @sha1 parameter. */ int seaf_fs_manager_index_file_blocks (SeafFSManager *mgr, const char *repo_id, int version, GList *paths, GList *blockids, unsigned char sha1[], gint64 file_size); int seaf_fs_manager_index_raw_blocks (SeafFSManager *mgr, const char *repo_id, int version, GList *paths, GList *blockids); int seaf_fs_manager_index_existed_file_blocks (SeafFSManager *mgr, const char *repo_id, int version, GList *blockids, unsigned char sha1[], gint64 file_size); int seaf_fs_manager_index_blocks (SeafFSManager *mgr, const char *repo_id, int version, const char *file_path, unsigned char sha1[], gint64 *size, SeafileCrypt *crypt, gboolean write_data, gboolean use_cdc, gint64 *indexed); Seafile * seaf_fs_manager_get_seafile (SeafFSManager *mgr, const char *repo_id, int version, const char *file_id); SeafDir * seaf_fs_manager_get_seafdir (SeafFSManager *mgr, const char *repo_id, int version, const char *dir_id); /* Make sure entries in the returned dir is sorted in descending order. */ SeafDir * seaf_fs_manager_get_seafdir_sorted (SeafFSManager *mgr, const char *repo_id, int version, const char *dir_id); SeafDir * seaf_fs_manager_get_seafdir_sorted_by_path (SeafFSManager *mgr, const char *repo_id, int version, const char *root_id, const char *path); int seaf_fs_manager_populate_blocklist (SeafFSManager *mgr, const char *repo_id, int version, const char *root_id, BlockList *bl); /* * For dir object, set *stop to TRUE to stop traversing the subtree. */ typedef gboolean (*TraverseFSTreeCallback) (SeafFSManager *mgr, const char *repo_id, int version, const char *obj_id, int type, void *user_data, gboolean *stop); int seaf_fs_manager_traverse_tree (SeafFSManager *mgr, const char *repo_id, int version, const char *root_id, TraverseFSTreeCallback callback, void *user_data, gboolean skip_errors); typedef gboolean (*TraverseFSPathCallback) (SeafFSManager *mgr, const char *path, SeafDirent *dent, void *user_data, gboolean *stop); int seaf_fs_manager_traverse_path (SeafFSManager *mgr, const char *repo_id, int version, const char *root_id, const char *dir_path, TraverseFSPathCallback callback, void *user_data); gboolean seaf_fs_manager_object_exists (SeafFSManager *mgr, const char *repo_id, int version, const char *id); void seaf_fs_manager_delete_object (SeafFSManager *mgr, const char *repo_id, int version, const char *id); gint64 seaf_fs_manager_get_file_size (SeafFSManager *mgr, const char *repo_id, int version, const char *file_id); gint64 seaf_fs_manager_get_fs_size (SeafFSManager *mgr, const char *repo_id, int version, const char *root_id); #ifndef SEAFILE_SERVER int seafile_write_chunk (const char *repo_id, int version, CDCDescriptor *chunk, SeafileCrypt *crypt, uint8_t *checksum, gboolean write_data); int seafile_check_write_chunk (CDCDescriptor *chunk, uint8_t *sha1, gboolean write_data); #endif /* SEAFILE_SERVER */ uint32_t calculate_chunk_size (uint64_t total_size); int seaf_fs_manager_count_fs_files (SeafFSManager *mgr, const char *repo_id, int version, const char *root_id); SeafDir * seaf_fs_manager_get_seafdir_by_path(SeafFSManager *mgr, const char *repo_id, int version, const char *root_id, const char *path, GError **error); char * seaf_fs_manager_get_seafile_id_by_path (SeafFSManager *mgr, const char *repo_id, int version, const char *root_id, const char *path, GError **error); char * seaf_fs_manager_path_to_obj_id (SeafFSManager *mgr, const char *repo_id, int version, const char *root_id, const char *path, guint32 *mode, GError **error); char * seaf_fs_manager_get_seafdir_id_by_path (SeafFSManager *mgr, const char *repo_id, int version, const char *root_id, const char *path, GError **error); SeafDirent * seaf_fs_manager_get_dirent_by_path (SeafFSManager *mgr, const char *repo_id, int version, const char *root_id, const char *path, GError **error); /* Check object integrity. */ gboolean seaf_fs_manager_verify_seafdir (SeafFSManager *mgr, const char *repo_id, int version, const char *dir_id, gboolean verify_id, gboolean *io_error); gboolean seaf_fs_manager_verify_seafile (SeafFSManager *mgr, const char *repo_id, int version, const char *file_id, gboolean verify_id, gboolean *io_error); gboolean seaf_fs_manager_verify_object (SeafFSManager *mgr, const char *repo_id, int version, const char *obj_id, gboolean verify_id, gboolean *io_error); int dir_version_from_repo_version (int repo_version); int seafile_version_from_repo_version (int repo_version); struct _CDCFileDescriptor; void seaf_fs_manager_calculate_seafile_id_json (int repo_version, struct _CDCFileDescriptor *cdc, guint8 *file_id_sha1); int seaf_fs_manager_remove_store (SeafFSManager *mgr, const char *store_id); GObject * seaf_fs_manager_get_file_count_info_by_path (SeafFSManager *mgr, const char *repo_id, int version, const char *root_id, const char *path, GError **error); GList * seaf_fs_manager_search_files_by_path (SeafFSManager *mgr, const char *repo_id, const char *path, const char *str); #endif ================================================ FILE: common/group-mgr.c ================================================ /* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */ #include "common.h" #include "seafile-session.h" #include "seaf-db.h" #include "group-mgr.h" #include "org-mgr.h" #include "seaf-utils.h" #include "utils.h" #include "log.h" #define DEFAULT_MAX_CONNECTIONS 100 struct _CcnetGroupManagerPriv { CcnetDB *db; const char *table_name; }; static int open_db (CcnetGroupManager *manager); static int check_db_table (CcnetGroupManager *manager, CcnetDB *db); CcnetGroupManager* ccnet_group_manager_new (SeafileSession *session) { CcnetGroupManager *manager = g_new0 (CcnetGroupManager, 1); manager->session = session; manager->priv = g_new0 (CcnetGroupManagerPriv, 1); return manager; } int ccnet_group_manager_init (CcnetGroupManager *manager) { return 0; } int ccnet_group_manager_prepare (CcnetGroupManager *manager) { const char *table_name = g_getenv("SEAFILE_MYSQL_DB_GROUP_TABLE_NAME"); if (!table_name || g_strcmp0 (table_name, "") == 0) manager->priv->table_name = g_strdup ("Group"); else manager->priv->table_name = g_strdup (table_name); return open_db(manager); } void ccnet_group_manager_start (CcnetGroupManager *manager) { } static CcnetDB * open_sqlite_db (CcnetGroupManager *manager) { CcnetDB *db = NULL; char *db_dir; char *db_path; db_dir = g_build_filename (manager->session->ccnet_dir, "GroupMgr", NULL); if (checkdir_with_mkdir(db_dir) < 0) { ccnet_error ("Cannot open db dir %s: %s\n", db_dir, strerror(errno)); g_free (db_dir); return NULL; } g_free (db_dir); db_path = g_build_filename (manager->session->ccnet_dir, "GroupMgr", "groupmgr.db", NULL); db = seaf_db_new_sqlite (db_path, DEFAULT_MAX_CONNECTIONS); g_free (db_path); return db; } static int open_db (CcnetGroupManager *manager) { CcnetDB *db = NULL; switch (seaf_db_type(manager->session->ccnet_db)) { case SEAF_DB_TYPE_SQLITE: db = open_sqlite_db (manager); break; case SEAF_DB_TYPE_PGSQL: case SEAF_DB_TYPE_MYSQL: db = manager->session->ccnet_db; break; } if (!db) return -1; manager->priv->db = db; if ((manager->session->ccnet_create_tables || seaf_db_type(db) == SEAF_DB_TYPE_PGSQL) && check_db_table (manager, db) < 0) { ccnet_warning ("Failed to create group db tables.\n"); return -1; } return 0; } /* -------- Group Database Management ---------------- */ static int check_db_table (CcnetGroupManager *manager, CcnetDB *db) { char *sql; GString *group_sql = g_string_new (""); const char *table_name = manager->priv->table_name; int db_type = seaf_db_type (db); if (db_type == SEAF_DB_TYPE_MYSQL) { g_string_printf (group_sql, "CREATE TABLE IF NOT EXISTS `%s` (`group_id` BIGINT " " PRIMARY KEY AUTO_INCREMENT, `group_name` VARCHAR(255)," " `creator_name` VARCHAR(255), `timestamp` BIGINT," " `type` VARCHAR(32), `parent_group_id` INTEGER)" "ENGINE=INNODB", table_name); if (seaf_db_query (db, group_sql->str) < 0) { g_string_free (group_sql, TRUE); return -1; } sql = "CREATE TABLE IF NOT EXISTS `GroupUser` ( " "`id` BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, `group_id` BIGINT," " `user_name` VARCHAR(255), `is_staff` tinyint, UNIQUE INDEX" " (`group_id`, `user_name`), INDEX (`user_name`))" "ENGINE=INNODB"; if (seaf_db_query (db, sql) < 0) return -1; sql = "CREATE TABLE IF NOT EXISTS GroupDNPair ( " "id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, group_id INTEGER," " dn VARCHAR(255))ENGINE=INNODB"; if (seaf_db_query (db, sql) < 0) return -1; sql = "CREATE TABLE IF NOT EXISTS GroupStructure ( " "id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, group_id INTEGER, " "path VARCHAR(1024), UNIQUE INDEX(group_id))ENGINE=INNODB"; if (seaf_db_query (db, sql) < 0) return -1; } else if (db_type == SEAF_DB_TYPE_SQLITE) { g_string_printf (group_sql, "CREATE TABLE IF NOT EXISTS `%s` (`group_id` INTEGER" " PRIMARY KEY AUTOINCREMENT, `group_name` VARCHAR(255)," " `creator_name` VARCHAR(255), `timestamp` BIGINT," " `type` VARCHAR(32), `parent_group_id` INTEGER)", table_name); if (seaf_db_query (db, group_sql->str) < 0) { g_string_free (group_sql, TRUE); return -1; } sql = "CREATE TABLE IF NOT EXISTS `GroupUser` (`group_id` INTEGER, " "`user_name` VARCHAR(255), `is_staff` tinyint)"; if (seaf_db_query (db, sql) < 0) return -1; sql = "CREATE UNIQUE INDEX IF NOT EXISTS groupid_username_indx on " "`GroupUser` (`group_id`, `user_name`)"; if (seaf_db_query (db, sql) < 0) return -1; sql = "CREATE INDEX IF NOT EXISTS username_indx on " "`GroupUser` (`user_name`)"; if (seaf_db_query (db, sql) < 0) return -1; sql = "CREATE TABLE IF NOT EXISTS GroupDNPair (group_id INTEGER," " dn VARCHAR(255))"; if (seaf_db_query (db, sql) < 0) return -1; sql = "CREATE TABLE IF NOT EXISTS GroupStructure (group_id INTEGER PRIMARY KEY, " "path VARCHAR(1024))"; if (seaf_db_query (db, sql) < 0) return -1; sql = "CREATE INDEX IF NOT EXISTS path_indx on " "`GroupStructure` (`path`)"; if (seaf_db_query (db, sql) < 0) return -1; } else if (db_type == SEAF_DB_TYPE_PGSQL) { g_string_printf (group_sql, "CREATE TABLE IF NOT EXISTS \"%s\" (group_id SERIAL" " PRIMARY KEY, group_name VARCHAR(255)," " creator_name VARCHAR(255), timestamp BIGINT," " type VARCHAR(32), parent_group_id INTEGER)", table_name); if (seaf_db_query (db, group_sql->str) < 0) { g_string_free (group_sql, TRUE); return -1; } sql = "CREATE TABLE IF NOT EXISTS GroupUser (group_id INTEGER," " user_name VARCHAR(255), is_staff smallint, UNIQUE " " (group_id, user_name))"; if (seaf_db_query (db, sql) < 0) return -1; //if (!pgsql_index_exists (db, "groupuser_username_idx")) { // sql = "CREATE INDEX groupuser_username_idx ON GroupUser (user_name)"; // if (seaf_db_query (db, sql) < 0) // return -1; //} sql = "CREATE TABLE IF NOT EXISTS GroupDNPair (group_id INTEGER," " dn VARCHAR(255))"; if (seaf_db_query (db, sql) < 0) return -1; sql = "CREATE TABLE IF NOT EXISTS GroupStructure (group_id INTEGER PRIMARY KEY, " "path VARCHAR(1024))"; if (seaf_db_query (db, sql) < 0) return -1; //if (!pgsql_index_exists (db, "structure_path_idx")) { // sql = "CREATE INDEX structure_path_idx ON GroupStructure (path)"; // if (seaf_db_query (db, sql) < 0) // return -1; //} } g_string_free (group_sql, TRUE); return 0; } static gboolean get_group_id_cb (CcnetDBRow *row, void *data) { int *id = data; int group_id = seaf_db_row_get_column_int(row, 0); *id = group_id; return FALSE; } static gboolean get_group_path_cb (CcnetDBRow *row, void *data) { char **path = (char **)data; const char *group_path = seaf_db_row_get_column_text (row, 0); *path = g_strdup (group_path); return FALSE; } static int create_group_common (CcnetGroupManager *mgr, const char *group_name, const char *user_name, int parent_group_id, GError **error) { CcnetDB *db = mgr->priv->db; gint64 now = get_current_time(); GString *sql = g_string_new (""); const char *table_name = mgr->priv->table_name; int group_id = -1; CcnetDBTrans *trans = seaf_db_begin_transaction (db); char *user_name_l = g_ascii_strdown (user_name, -1); if (seaf_db_type(db) == SEAF_DB_TYPE_PGSQL) g_string_printf (sql, "INSERT INTO \"%s\"(group_name, " "creator_name, timestamp, parent_group_id) VALUES(?, ?, ?, ?)", table_name); else g_string_printf (sql, "INSERT INTO `%s`(group_name, " "creator_name, timestamp, parent_group_id) VALUES(?, ?, ?, ?)", table_name); if (seaf_db_trans_query (trans, sql->str, 4, "string", group_name, "string", user_name_l, "int64", now, "int", parent_group_id) < 0) goto error; if (seaf_db_type(db) == SEAF_DB_TYPE_PGSQL) g_string_printf (sql, "SELECT group_id FROM \"%s\" WHERE " "group_name = ? AND creator_name = ? " "AND timestamp = ?", table_name); else g_string_printf (sql, "SELECT group_id FROM `%s` WHERE " "group_name = ? AND creator_name = ? " "AND timestamp = ?", table_name); seaf_db_trans_foreach_selected_row (trans, sql->str, get_group_id_cb, &group_id, 3, "string", group_name, "string", user_name_l, "int64", now); if (group_id < 0) goto error; if (g_strcmp0(user_name, "system admin") != 0) { g_string_printf (sql, "INSERT INTO GroupUser (group_id, user_name, is_staff) VALUES (?, ?, ?)"); if (seaf_db_trans_query (trans, sql->str, 3, "int", group_id, "string", user_name_l, "int", 1) < 0) goto error; } if (parent_group_id == -1) { g_string_printf (sql, "INSERT INTO GroupStructure (group_id, path) VALUES (?,'%d')", group_id); if (seaf_db_trans_query (trans, sql->str, 1, "int", group_id) < 0) goto error; } else if (parent_group_id > 0) { g_string_printf (sql, "SELECT path FROM GroupStructure WHERE group_id=?"); char *path = NULL; seaf_db_trans_foreach_selected_row (trans, sql->str, get_group_path_cb, &path, 1, "int", parent_group_id); if (!path) goto error; g_string_printf (sql, "INSERT INTO GroupStructure (group_id, path) VALUES (?, '%s, %d')", path, group_id); if (seaf_db_trans_query (trans, sql->str, 1, "int", group_id) < 0) { g_free (path); goto error; } g_free (path); } seaf_db_commit (trans); seaf_db_trans_close (trans); g_string_free (sql, TRUE); g_free (user_name_l); return group_id; error: seaf_db_rollback (trans); seaf_db_trans_close (trans); g_set_error (error, CCNET_DOMAIN, 0, "Failed to create group"); g_string_free (sql, TRUE); g_free (user_name_l); return -1; } int ccnet_group_manager_create_group (CcnetGroupManager *mgr, const char *group_name, const char *user_name, int parent_group_id, GError **error) { return create_group_common (mgr, group_name, user_name, parent_group_id, error); } /* static gboolean */ /* duplicate_org_group_name (CcnetGroupManager *mgr, */ /* int org_id, */ /* const char *group_name) */ /* { */ /* GList *org_groups = NULL, *ptr; */ /* CcnetOrgManager *org_mgr = seaf->org_mgr; */ /* org_groups = ccnet_org_manager_get_org_groups (org_mgr, org_id, -1, -1); */ /* if (!org_groups) */ /* return FALSE; */ /* for (ptr = org_groups; ptr; ptr = ptr->next) { */ /* int group_id = (int)(long)ptr->data; */ /* CcnetGroup *group = ccnet_group_manager_get_group (mgr, group_id, */ /* NULL); */ /* if (!group) */ /* continue; */ /* if (g_strcmp0 (group_name, ccnet_group_get_group_name(group)) == 0) { */ /* g_list_free (org_groups); */ /* g_object_unref (group); */ /* return TRUE; */ /* } else { */ /* g_object_unref (group); */ /* } */ /* } */ /* g_list_free (org_groups); */ /* return FALSE; */ /* } */ int ccnet_group_manager_create_org_group (CcnetGroupManager *mgr, int org_id, const char *group_name, const char *user_name, int parent_group_id, GError **error) { CcnetOrgManager *org_mgr = seaf->org_mgr; /* if (duplicate_org_group_name (mgr, org_id, group_name)) { */ /* g_set_error (error, CCNET_DOMAIN, 0, */ /* "The group has already created in this org."); */ /* return -1; */ /* } */ int group_id = create_group_common (mgr, group_name, user_name, parent_group_id, error); if (group_id < 0) { g_set_error (error, CCNET_DOMAIN, 0, "Failed to create org group."); return -1; } if (ccnet_org_manager_add_org_group (org_mgr, org_id, group_id, error) < 0) { g_set_error (error, CCNET_DOMAIN, 0, "Failed to create org group."); return -1; } return group_id; } static gboolean check_group_staff (CcnetDB *db, int group_id, const char *user_name, gboolean in_structure) { gboolean exists, err; if (!in_structure) { exists = seaf_db_statement_exists (db, "SELECT group_id FROM GroupUser WHERE " "group_id = ? AND user_name = ? AND " "is_staff = 1", &err, 2, "int", group_id, "string", user_name); if (err) { ccnet_warning ("DB error when check staff user exist in GroupUser.\n"); return FALSE; } return exists; } GString *sql = g_string_new(""); g_string_printf (sql, "SELECT path FROM GroupStructure WHERE group_id=?"); char *path = seaf_db_statement_get_string (db, sql->str, 1, "int", group_id); if (!path) { exists = seaf_db_statement_exists (db, "SELECT group_id FROM GroupUser WHERE " "group_id = ? AND user_name = ? AND " "is_staff = 1", &err, 2, "int", group_id, "string", user_name); } else { g_string_printf (sql, "SELECT group_id FROM GroupUser WHERE " "group_id IN (%s) AND user_name = ? AND " "is_staff = 1", path); exists = seaf_db_statement_exists (db, sql->str, &err, 1, "string", user_name); } g_string_free (sql, TRUE); g_free (path); if (err) { ccnet_warning ("DB error when check staff user exist in GroupUser.\n"); return FALSE; } return exists; } int ccnet_group_manager_remove_group (CcnetGroupManager *mgr, int group_id, gboolean remove_anyway, GError **error) { CcnetDB *db = mgr->priv->db; GString *sql = g_string_new (""); gboolean exists, err; const char *table_name = mgr->priv->table_name; /* No permission check here, since both group staff and seahub staff * can remove group. */ if (remove_anyway != TRUE) { if (seaf_db_type(db) == SEAF_DB_TYPE_PGSQL) g_string_printf (sql, "SELECT 1 FROM \"%s\" WHERE parent_group_id=?", table_name); else g_string_printf (sql, "SELECT 1 FROM `%s` WHERE parent_group_id=?", table_name); exists = seaf_db_statement_exists (db, sql->str, &err, 1, "int", group_id); if (err) { ccnet_warning ("DB error when check remove group.\n"); g_string_free (sql, TRUE); return -1; } if (exists) { ccnet_warning ("Failed to remove group [%d] whose child group must be removed first.\n", group_id); g_string_free (sql, TRUE); return -1; } } if (seaf_db_type(db) == SEAF_DB_TYPE_PGSQL) g_string_printf (sql, "DELETE FROM \"%s\" WHERE group_id=?", table_name); else g_string_printf (sql, "DELETE FROM `%s` WHERE group_id=?", table_name); seaf_db_statement_query (db, sql->str, 1, "int", group_id); g_string_printf (sql, "DELETE FROM GroupUser WHERE group_id=?"); seaf_db_statement_query (db, sql->str, 1, "int", group_id); g_string_printf (sql, "DELETE FROM GroupStructure WHERE group_id=?"); seaf_db_statement_query (db, sql->str, 1, "int", group_id); g_string_free (sql, TRUE); return 0; } static gboolean check_group_exists (CcnetGroupManager *mgr, CcnetDB *db, int group_id) { GString *sql = g_string_new (""); const char *table_name = mgr->priv->table_name; gboolean exists, err; if (seaf_db_type(db) == SEAF_DB_TYPE_PGSQL) { g_string_printf (sql, "SELECT group_id FROM \"%s\" WHERE group_id=?", table_name); exists = seaf_db_statement_exists (db, sql->str, &err, 1, "int", group_id); } else { g_string_printf (sql, "SELECT group_id FROM `%s` WHERE group_id=?", table_name); exists = seaf_db_statement_exists (db, sql->str, &err, 1, "int", group_id); } g_string_free (sql, TRUE); if (err) { ccnet_warning ("DB error when check group exist.\n"); return FALSE; } return exists; } int ccnet_group_manager_add_member (CcnetGroupManager *mgr, int group_id, const char *user_name, const char *member_name, GError **error) { CcnetDB *db = mgr->priv->db; /* check whether group exists */ if (!check_group_exists (mgr, db, group_id)) { g_set_error (error, CCNET_DOMAIN, 0, "Group not exists"); return -1; } char *member_name_l = g_ascii_strdown (member_name, -1); int rc = seaf_db_statement_query (db, "INSERT INTO GroupUser (group_id, user_name, is_staff) VALUES (?, ?, ?)", 3, "int", group_id, "string", member_name_l, "int", 0); g_free (member_name_l); if (rc < 0) { g_set_error (error, CCNET_DOMAIN, 0, "Failed to add member to group"); return -1; } return 0; } int ccnet_group_manager_remove_member (CcnetGroupManager *mgr, int group_id, const char *user_name, const char *member_name, GError **error) { CcnetDB *db = mgr->priv->db; char *sql; /* check whether group exists */ if (!check_group_exists (mgr, db, group_id)) { g_set_error (error, CCNET_DOMAIN, 0, "Group not exists"); return -1; } /* can not remove myself */ if (g_strcmp0 (user_name, member_name) == 0) { g_set_error (error, CCNET_DOMAIN, 0, "Can not remove myself"); return -1; } sql = "DELETE FROM GroupUser WHERE group_id=? AND user_name=?"; seaf_db_statement_query (db, sql, 2, "int", group_id, "string", member_name); return 0; } int ccnet_group_manager_set_admin (CcnetGroupManager *mgr, int group_id, const char *member_name, GError **error) { CcnetDB *db = mgr->priv->db; seaf_db_statement_query (db, "UPDATE GroupUser SET is_staff = 1 " "WHERE group_id = ? and user_name = ?", 2, "int", group_id, "string", member_name); return 0; } int ccnet_group_manager_unset_admin (CcnetGroupManager *mgr, int group_id, const char *member_name, GError **error) { CcnetDB *db = mgr->priv->db; seaf_db_statement_query (db, "UPDATE GroupUser SET is_staff = 0 " "WHERE group_id = ? and user_name = ?", 2, "int", group_id, "string", member_name); return 0; } int ccnet_group_manager_set_group_name (CcnetGroupManager *mgr, int group_id, const char *group_name, GError **error) { const char *table_name = mgr->priv->table_name; GString *sql = g_string_new (""); CcnetDB *db = mgr->priv->db; if (seaf_db_type(db) == SEAF_DB_TYPE_PGSQL) { g_string_printf (sql, "UPDATE \"%s\" SET group_name = ? " "WHERE group_id = ?", table_name); seaf_db_statement_query (db, sql->str, 2, "string", group_name, "int", group_id); } else { g_string_printf (sql, "UPDATE `%s` SET group_name = ? " "WHERE group_id = ?", table_name); seaf_db_statement_query (db, sql->str, 2, "string", group_name, "int", group_id); } g_string_free (sql, TRUE); return 0; } int ccnet_group_manager_quit_group (CcnetGroupManager *mgr, int group_id, const char *user_name, GError **error) { CcnetDB *db = mgr->priv->db; /* check whether group exists */ if (!check_group_exists (mgr, db, group_id)) { g_set_error (error, CCNET_DOMAIN, 0, "Group not exists"); return -1; } seaf_db_statement_query (db, "DELETE FROM GroupUser WHERE group_id=? " "AND user_name=?", 2, "int", group_id, "string", user_name); return 0; } static gboolean get_user_groups_cb (CcnetDBRow *row, void *data) { GList **plist = data; CcnetGroup *group; int group_id = seaf_db_row_get_column_int (row, 0); const char *group_name = seaf_db_row_get_column_text (row, 1); const char *creator_name = seaf_db_row_get_column_text (row, 2); gint64 ts = seaf_db_row_get_column_int64 (row, 3); int parent_group_id = seaf_db_row_get_column_int (row, 4); group = g_object_new (CCNET_TYPE_GROUP, "id", group_id, "group_name", group_name, "creator_name", creator_name, "timestamp", ts, "source", "DB", "parent_group_id", parent_group_id, NULL); *plist = g_list_append (*plist, group); return TRUE; } GList * ccnet_group_manager_get_ancestor_groups (CcnetGroupManager *mgr, int group_id) { CcnetDB *db = mgr->priv->db; GList *ret = NULL; CcnetGroup *group = NULL; GString *sql = g_string_new (""); const char *table_name = mgr->priv->table_name; g_string_printf (sql, "SELECT path FROM GroupStructure WHERE group_id=?"); char *path = seaf_db_statement_get_string (db, sql->str, 1, "int", group_id); if (path) { if (seaf_db_type(db) == SEAF_DB_TYPE_PGSQL) g_string_printf (sql, "SELECT g.group_id, group_name, creator_name, timestamp, parent_group_id FROM " "\"%s\" g WHERE g.group_id IN(%s) " "ORDER BY g.group_id", table_name, path); else g_string_printf (sql, "SELECT g.group_id, group_name, creator_name, timestamp, parent_group_id FROM " "`%s` g WHERE g.group_id IN(%s) " "ORDER BY g.group_id", table_name, path); if (seaf_db_statement_foreach_row (db, sql->str, get_user_groups_cb, &ret, 0) < 0) { ccnet_warning ("Failed to get ancestor groups of group %d\n", group_id); g_string_free (sql, TRUE); g_free (path); return NULL; } g_string_free (sql, TRUE); g_free (path); } else { // group is not in structure, return itself. group = ccnet_group_manager_get_group (mgr, group_id, NULL); if (group) { ret = g_list_prepend (ret, group); } } return ret; } static gint group_comp_func (gconstpointer a, gconstpointer b) { CcnetGroup *g1 = (CcnetGroup *)a; CcnetGroup *g2 = (CcnetGroup *)b; int id_1 = 0, id_2 = 0; g_object_get (g1, "id", &id_1, NULL); g_object_get (g2, "id", &id_2, NULL); if (id_1 == id_2) return 0; return id_1 > id_2 ? -1 : 1; } gboolean get_group_paths_cb (CcnetDBRow *row, void *data) { GString *paths = data; const char *path = seaf_db_row_get_column_text (row, 0); if (g_strcmp0 (paths->str, "") == 0) g_string_append_printf (paths, "%s", path); else g_string_append_printf (paths, ", %s", path); return TRUE; } GList * ccnet_group_manager_get_groups_by_user (CcnetGroupManager *mgr, const char *user_name, gboolean return_ancestors, GError **error) { CcnetDB *db = mgr->priv->db; GList *groups = NULL, *ret = NULL; GList *ptr; GString *sql = g_string_new (""); const char *table_name = mgr->priv->table_name; CcnetGroup *group; int parent_group_id = 0, group_id = 0; if (seaf_db_type(db) == SEAF_DB_TYPE_PGSQL) g_string_printf (sql, "SELECT g.group_id, group_name, creator_name, timestamp, parent_group_id FROM " "\"%s\" g, GroupUser u WHERE g.group_id = u.group_id AND user_name=? ORDER BY g.group_id DESC", table_name); else g_string_printf (sql, "SELECT g.group_id, group_name, creator_name, timestamp, parent_group_id FROM " "`%s` g, GroupUser u WHERE g.group_id = u.group_id AND user_name=? ORDER BY g.group_id DESC", table_name); if (seaf_db_statement_foreach_row (db, sql->str, get_user_groups_cb, &groups, 1, "string", user_name) < 0) { g_string_free (sql, TRUE); return NULL; } if (!return_ancestors) { g_string_free (sql, TRUE); return groups; } /* Get ancestor groups in descending order by group_id.*/ GString *paths = g_string_new (""); g_string_erase (sql, 0, -1); for (ptr = groups; ptr; ptr = ptr->next) { group = ptr->data; g_object_get (group, "parent_group_id", &parent_group_id, NULL); g_object_get (group, "id", &group_id, NULL); if (parent_group_id != 0) { if (g_strcmp0(sql->str, "") == 0) g_string_append_printf (sql, "SELECT path FROM GroupStructure WHERE group_id IN (%d", group_id); else g_string_append_printf (sql, ", %d", group_id); } else { g_object_ref (group); ret = g_list_insert_sorted (ret, group, group_comp_func); } } if (g_strcmp0(sql->str, "") != 0) { g_string_append_printf (sql, ")"); if (seaf_db_statement_foreach_row (db, sql->str, get_group_paths_cb, paths, 0) < 0) { g_list_free_full (ret, g_object_unref); ret = NULL; goto out; } if (g_strcmp0(paths->str, "") == 0) { ccnet_warning ("Failed to get groups path for user %s\n", user_name); g_list_free_full (ret, g_object_unref); ret = NULL; goto out; } g_string_printf (sql, "SELECT g.group_id, group_name, creator_name, timestamp, parent_group_id FROM " "`%s` g WHERE g.group_id IN (%s) ORDER BY g.group_id DESC", table_name, paths->str); if (seaf_db_statement_foreach_row (db, sql->str, get_user_groups_cb, &ret, 0) < 0) { g_list_free_full (ret, g_object_unref); ret = NULL; goto out; } } ret = g_list_sort (ret, group_comp_func); out: g_string_free (sql, TRUE); g_list_free_full (groups, g_object_unref); g_string_free (paths, TRUE); return ret; } static gboolean get_ccnetgroup_cb (CcnetDBRow *row, void *data) { CcnetGroup **p_group = data; int group_id; const char *group_name; const char *creator; int parent_group_id; gint64 ts; group_id = seaf_db_row_get_column_int (row, 0); group_name = (const char *)seaf_db_row_get_column_text (row, 1); creator = (const char *)seaf_db_row_get_column_text (row, 2); ts = seaf_db_row_get_column_int64 (row, 3); parent_group_id = seaf_db_row_get_column_int (row, 4); char *creator_l = g_ascii_strdown (creator, -1); *p_group = g_object_new (CCNET_TYPE_GROUP, "id", group_id, "group_name", group_name, "creator_name", creator_l, "timestamp", ts, "source", "DB", "parent_group_id", parent_group_id, NULL); g_free (creator_l); return FALSE; } GList * ccnet_group_manager_get_child_groups (CcnetGroupManager *mgr, int group_id, GError **error) { CcnetDB *db = mgr->priv->db; GString *sql = g_string_new (""); GList *ret = NULL; const char *table_name = mgr->priv->table_name; if (seaf_db_type(db) == SEAF_DB_TYPE_PGSQL) g_string_printf (sql, "SELECT group_id, group_name, creator_name, timestamp, parent_group_id FROM " "\"%s\" WHERE parent_group_id=?", table_name); else g_string_printf (sql, "SELECT group_id, group_name, creator_name, timestamp, parent_group_id FROM " "`%s` WHERE parent_group_id=?", table_name); if (seaf_db_statement_foreach_row (db, sql->str, get_user_groups_cb, &ret, 1, "int", group_id) < 0) { g_string_free (sql, TRUE); return NULL; } g_string_free (sql, TRUE); return ret; } GList * ccnet_group_manager_get_descendants_groups(CcnetGroupManager *mgr, int group_id, GError **error) { GList *ret = NULL; CcnetDB *db = mgr->priv->db; const char *table_name = mgr->priv->table_name; GString *sql = g_string_new(""); if (seaf_db_type(db) == SEAF_DB_TYPE_PGSQL) g_string_printf (sql, "SELECT g.group_id, group_name, creator_name, timestamp, " "parent_group_id FROM \"%s\" g, GroupStructure s " "WHERE g.group_id=s.group_id " "AND (s.path LIKE '%d, %%' OR s.path LIKE '%%, %d, %%' " "OR g.group_id=?)", table_name, group_id, group_id); else g_string_printf (sql, "SELECT g.group_id, group_name, creator_name, timestamp, " "parent_group_id FROM `%s` g, GroupStructure s " "WHERE g.group_id=s.group_id " "AND (s.path LIKE '%d, %%' OR s.path LIKE '%%, %d, %%' " "OR g.group_id=?)", table_name, group_id, group_id); if (seaf_db_statement_foreach_row (db, sql->str, get_user_groups_cb, &ret, 1, "int", group_id) < 0) { g_string_free (sql, TRUE); return NULL; } g_string_free (sql, TRUE); return ret; } CcnetGroup * ccnet_group_manager_get_group (CcnetGroupManager *mgr, int group_id, GError **error) { CcnetDB *db = mgr->priv->db; GString *sql = g_string_new (""); CcnetGroup *ccnetgroup = NULL; const char *table_name = mgr->priv->table_name; if (seaf_db_type(db) == SEAF_DB_TYPE_PGSQL) g_string_printf (sql, "SELECT group_id, group_name, creator_name, timestamp, parent_group_id FROM " "\"%s\" WHERE group_id = ?", table_name); else g_string_printf (sql, "SELECT group_id, group_name, creator_name, timestamp, parent_group_id FROM " "`%s` WHERE group_id = ?", table_name); if (seaf_db_statement_foreach_row (db, sql->str, get_ccnetgroup_cb, &ccnetgroup, 1, "int", group_id) < 0) { g_string_free (sql, TRUE); return NULL; } g_string_free (sql, TRUE); return ccnetgroup; } static gboolean get_ccnet_groupuser_cb (CcnetDBRow *row, void *data) { GList **plist = data; CcnetGroupUser *group_user; int group_id = seaf_db_row_get_column_int (row, 0); const char *user = (const char *)seaf_db_row_get_column_text (row, 1); int is_staff = seaf_db_row_get_column_int (row, 2); char *user_l = g_ascii_strdown (user, -1); group_user = g_object_new (CCNET_TYPE_GROUP_USER, "group_id", group_id, "user_name", user_l, "is_staff", is_staff, NULL); g_free (user_l); if (group_user != NULL) { *plist = g_list_prepend (*plist, group_user); } return TRUE; } GList * ccnet_group_manager_get_group_members (CcnetGroupManager *mgr, int group_id, int start, int limit, GError **error) { CcnetDB *db = mgr->priv->db; char *sql; GList *group_users = NULL; int rc; if (limit == -1) { sql = "SELECT group_id, user_name, is_staff FROM GroupUser WHERE group_id = ?"; rc =seaf_db_statement_foreach_row (db, sql, get_ccnet_groupuser_cb, &group_users, 1, "int", group_id); } else { sql = "SELECT group_id, user_name, is_staff FROM GroupUser WHERE group_id = ? LIMIT ? OFFSET ?"; rc = seaf_db_statement_foreach_row (db, sql, get_ccnet_groupuser_cb, &group_users, 3, "int", group_id, "int", limit, "int", start); } if (rc < 0) { return NULL; } return g_list_reverse (group_users); } GList * ccnet_group_manager_get_members_with_prefix (CcnetGroupManager *mgr, int group_id, const char *prefix, GError **error) { CcnetDB *db = mgr->priv->db; GList *group_users = NULL; GList *ptr; CcnetGroup *group; GString *sql = g_string_new (""); int id; g_string_printf(sql, "SELECT group_id, user_name, is_staff FROM GroupUser " "WHERE group_id IN ("); GList *groups = ccnet_group_manager_get_descendants_groups(mgr, group_id, NULL); if (!groups) g_string_append_printf(sql, "%d", group_id); for (ptr = groups; ptr; ptr = ptr->next) { group = ptr->data; g_object_get(group, "id", &id, NULL); g_string_append_printf(sql, "%d", id); if (ptr->next) g_string_append_printf(sql, ", "); } g_string_append_printf(sql, ")"); if (prefix) g_string_append_printf(sql, " AND user_name LIKE '%s%%'", prefix); g_list_free_full (groups, g_object_unref); if (seaf_db_statement_foreach_row (db, sql->str, get_ccnet_groupuser_cb, &group_users, 0) < 0) { g_string_free(sql, TRUE); return NULL; } g_string_free(sql, TRUE); return group_users; } int ccnet_group_manager_check_group_staff (CcnetGroupManager *mgr, int group_id, const char *user_name, gboolean in_structure) { return check_group_staff (mgr->priv->db, group_id, user_name, in_structure); } int ccnet_group_manager_remove_group_user (CcnetGroupManager *mgr, const char *user) { CcnetDB *db = mgr->priv->db; seaf_db_statement_query (db, "DELETE FROM GroupUser " "WHERE user_name = ?", 1, "string", user); return 0; } int ccnet_group_manager_is_group_user (CcnetGroupManager *mgr, int group_id, const char *user, gboolean in_structure) { CcnetDB *db = mgr->priv->db; gboolean exists, err; exists = seaf_db_statement_exists (db, "SELECT group_id FROM GroupUser " "WHERE group_id=? AND user_name=?", &err, 2, "int", group_id, "string", user); if (err) { ccnet_warning ("DB error when check user exist in GroupUser.\n"); return 0; } if (!in_structure || exists) return exists ? 1 : 0; GList *ptr; GList *groups = ccnet_group_manager_get_groups_by_user (mgr, user, TRUE, NULL); if (!groups) return 0; CcnetGroup *group; int id; for (ptr = groups; ptr; ptr = ptr->next) { group = ptr->data; g_object_get (group, "id", &id, NULL); if (group_id == id) { exists = TRUE; break; } } g_list_free_full (groups, g_object_unref); return exists ? 1 : 0; } static gboolean get_all_ccnetgroups_cb (CcnetDBRow *row, void *data) { GList **plist = data; int group_id; const char *group_name; const char *creator; gint64 ts; int parent_group_id; group_id = seaf_db_row_get_column_int (row, 0); group_name = (const char *)seaf_db_row_get_column_text (row, 1); creator = (const char *)seaf_db_row_get_column_text (row, 2); ts = seaf_db_row_get_column_int64 (row, 3); parent_group_id = seaf_db_row_get_column_int (row, 4); char *creator_l = g_ascii_strdown (creator, -1); CcnetGroup *group = g_object_new (CCNET_TYPE_GROUP, "id", group_id, "group_name", group_name, "creator_name", creator_l, "timestamp", ts, "source", "DB", "parent_group_id", parent_group_id, NULL); g_free (creator_l); *plist = g_list_prepend (*plist, group); return TRUE; } GList * ccnet_group_manager_get_top_groups (CcnetGroupManager *mgr, gboolean including_org, GError **error) { CcnetDB *db = mgr->priv->db; GList *ret = NULL; GString *sql = g_string_new (""); const char *table_name = mgr->priv->table_name; int rc; if (seaf_db_type(mgr->priv->db) == SEAF_DB_TYPE_PGSQL) { if (including_org) g_string_printf (sql, "SELECT group_id, group_name, " "creator_name, timestamp, parent_group_id FROM \"%s\" " "WHERE parent_group_id=-1 ORDER BY timestamp DESC", table_name); else g_string_printf (sql, "SELECT g.group_id, g.group_name, " "g.creator_name, g.timestamp, g.parent_group_id FROM \"%s\" g " "LEFT JOIN OrgGroup o ON g.group_id = o.group_id " "WHERE g.parent_group_id=-1 AND o.group_id is NULL " "ORDER BY timestamp DESC", table_name); } else { if (including_org) g_string_printf (sql, "SELECT group_id, group_name, " "creator_name, timestamp, parent_group_id FROM `%s` " "WHERE parent_group_id=-1 ORDER BY timestamp DESC", table_name); else g_string_printf (sql, "SELECT g.group_id, g.group_name, " "g.creator_name, g.timestamp, g.parent_group_id FROM `%s` g " "LEFT JOIN OrgGroup o ON g.group_id = o.group_id " "WHERE g.parent_group_id=-1 AND o.group_id is NULL " "ORDER BY timestamp DESC", table_name); } rc = seaf_db_statement_foreach_row (db, sql->str, get_all_ccnetgroups_cb, &ret, 0); g_string_free (sql, TRUE); if (rc < 0) return NULL; return g_list_reverse (ret); } GList* ccnet_group_manager_list_all_departments (CcnetGroupManager *mgr, GError **error) { CcnetDB *db = mgr->priv->db; GList *ret = NULL; GString *sql = g_string_new (""); const char *table_name = mgr->priv->table_name; int rc; int db_type = seaf_db_type(db); if (db_type == SEAF_DB_TYPE_PGSQL) { g_string_printf (sql, "SELECT group_id, group_name, " "creator_name, timestamp, type, " "parent_group_id FROM \"%s\" " "WHERE parent_group_id = -1 OR parent_group_id > 0 " "ORDER BY group_id", table_name); rc = seaf_db_statement_foreach_row (db, sql->str, get_all_ccnetgroups_cb, &ret, 0); } else { g_string_printf (sql, "SELECT `group_id`, `group_name`, " "`creator_name`, `timestamp`, `type`, `parent_group_id` FROM `%s` " "WHERE parent_group_id = -1 OR parent_group_id > 0 " "ORDER BY group_id", table_name); rc = seaf_db_statement_foreach_row (db, sql->str, get_all_ccnetgroups_cb, &ret, 0); } g_string_free (sql, TRUE); if (rc < 0) return NULL; return g_list_reverse (ret); } GList* ccnet_group_manager_get_all_groups (CcnetGroupManager *mgr, int start, int limit, GError **error) { CcnetDB *db = mgr->priv->db; GList *ret = NULL; GString *sql = g_string_new (""); const char *table_name = mgr->priv->table_name; int rc; if (seaf_db_type(mgr->priv->db) == SEAF_DB_TYPE_PGSQL) { if (start == -1 && limit == -1) { g_string_printf (sql, "SELECT group_id, group_name, " "creator_name, timestamp, parent_group_id FROM \"%s\" " "ORDER BY timestamp DESC", table_name); rc = seaf_db_statement_foreach_row (db, sql->str, get_all_ccnetgroups_cb, &ret, 0); } else { g_string_printf (sql, "SELECT group_id, group_name, " "creator_name, timestamp, parent_group_id FROM \"%s\" " "ORDER BY timestamp DESC LIMIT ? OFFSET ?", table_name); rc = seaf_db_statement_foreach_row (db, sql->str, get_all_ccnetgroups_cb, &ret, 2, "int", limit, "int", start); } } else { if (start == -1 && limit == -1) { g_string_printf (sql, "SELECT `group_id`, `group_name`, " "`creator_name`, `timestamp`, `parent_group_id` FROM `%s` " "ORDER BY timestamp DESC", table_name); rc = seaf_db_statement_foreach_row (db, sql->str, get_all_ccnetgroups_cb, &ret, 0); } else { g_string_printf (sql, "SELECT `group_id`, `group_name`, " "`creator_name`, `timestamp`, `parent_group_id` FROM `%s` " "ORDER BY timestamp DESC LIMIT ? OFFSET ?", table_name); rc = seaf_db_statement_foreach_row (db, sql->str, get_all_ccnetgroups_cb, &ret, 2, "int", limit, "int", start); } } g_string_free (sql, TRUE); if (rc < 0) return NULL; return g_list_reverse (ret); } int ccnet_group_manager_set_group_creator (CcnetGroupManager *mgr, int group_id, const char *user_name) { CcnetDB *db = mgr->priv->db; const char *table_name = mgr->priv->table_name; GString *sql = g_string_new (""); if (seaf_db_type(db) == SEAF_DB_TYPE_PGSQL) { g_string_printf (sql, "UPDATE \"%s\" SET creator_name = ? WHERE group_id = ?", table_name); } else { g_string_printf (sql, "UPDATE `%s` SET creator_name = ? WHERE group_id = ?", table_name); } seaf_db_statement_query (db, sql->str, 2, "string", user_name, "int", group_id); g_string_free (sql, TRUE); return 0; } GList * ccnet_group_manager_search_groups (CcnetGroupManager *mgr, const char *keyword, int start, int limit) { CcnetDB *db = mgr->priv->db; GList *ret = NULL; GString *sql = g_string_new (""); const char *table_name = mgr->priv->table_name; int rc; char *db_patt = g_strdup_printf ("%%%s%%", keyword); if (seaf_db_type(db) == SEAF_DB_TYPE_PGSQL) { if (start == -1 && limit == -1) { g_string_printf (sql, "SELECT group_id, group_name, " "creator_name, timestamp, parent_group_id " "FROM \"%s\" WHERE group_name LIKE ?", table_name); rc = seaf_db_statement_foreach_row (db, sql->str, get_all_ccnetgroups_cb, &ret, 1, "string", db_patt); } else { g_string_printf (sql, "SELECT group_id, group_name, " "creator_name, timestamp, parent_group_id " "FROM \"%s\" WHERE group_name LIKE ? " "LIMIT ? OFFSET ?", table_name); rc = seaf_db_statement_foreach_row (db, sql->str, get_all_ccnetgroups_cb, &ret, 3, "string", db_patt, "int", limit, "int", start); } } else { if (start == -1 && limit == -1) { g_string_printf (sql, "SELECT group_id, group_name, " "creator_name, timestamp, parent_group_id " "FROM `%s` WHERE group_name LIKE ?", table_name); rc = seaf_db_statement_foreach_row (db, sql->str, get_all_ccnetgroups_cb, &ret, 1, "string", db_patt); } else { g_string_printf (sql, "SELECT group_id, group_name, " "creator_name, timestamp, parent_group_id " "FROM `%s` WHERE group_name LIKE ? " "LIMIT ? OFFSET ?", table_name); rc = seaf_db_statement_foreach_row (db, sql->str, get_all_ccnetgroups_cb, &ret, 3, "string", db_patt, "int", limit, "int", start); } } g_free (db_patt); g_string_free (sql, TRUE); if (rc < 0) { while (ret != NULL) { g_object_unref (ret->data); ret = g_list_delete_link (ret, ret); } return NULL; } return g_list_reverse (ret); } static gboolean get_groups_members_cb (CcnetDBRow *row, void *data) { GList **users = data; const char *user = seaf_db_row_get_column_text (row, 0); char *user_l = g_ascii_strdown (user, -1); CcnetGroupUser *group_user = g_object_new (CCNET_TYPE_GROUP_USER, "user_name", user_l, NULL); g_free (user_l); *users = g_list_append(*users, group_user); return TRUE; } /* group_ids is json format: "[id1, id2, id3, ...]" */ GList * ccnet_group_manager_get_groups_members (CcnetGroupManager *mgr, const char *group_ids, GError **error) { CcnetDB *db = mgr->priv->db; GList *ret = NULL; GString *sql = g_string_new (""); int i, group_id; json_t *j_array = NULL, *j_obj; json_error_t j_error; g_string_printf (sql, "SELECT DISTINCT user_name FROM GroupUser WHERE group_id IN ("); j_array = json_loadb (group_ids, strlen(group_ids), 0, &j_error); if (!j_array) { g_set_error (error, CCNET_DOMAIN, 0, "Bad args."); g_string_free (sql, TRUE); return NULL; } size_t id_num = json_array_size (j_array); for (i = 0; i < id_num; i++) { j_obj = json_array_get (j_array, i); group_id = json_integer_value (j_obj); if (group_id <= 0) { g_set_error (error, CCNET_DOMAIN, 0, "Bad args."); g_string_free (sql, TRUE); json_decref (j_array); return NULL; } g_string_append_printf (sql, "%d", group_id); if (i + 1 < id_num) g_string_append_printf (sql, ","); } g_string_append_printf (sql, ")"); json_decref (j_array); if (seaf_db_statement_foreach_row (db, sql->str, get_groups_members_cb, &ret, 0) < 0) ccnet_warning("Failed to get groups members for group [%s].\n", group_ids); g_string_free (sql, TRUE); return ret; } GList* ccnet_group_manager_search_group_members (CcnetGroupManager *mgr, int group_id, const char *pattern) { CcnetDB *db = mgr->priv->db; GList *ret = NULL; char *sql; int rc; char *db_patt = g_strdup_printf ("%%%s%%", pattern); sql = "SELECT DISTINCT user_name FROM GroupUser " "WHERE group_id = ? AND user_name LIKE ? ORDER BY user_name"; rc = seaf_db_statement_foreach_row (db, sql, get_groups_members_cb, &ret, 2, "int", group_id, "string", db_patt); g_free (db_patt); if (rc < 0) { g_list_free_full (ret, g_object_unref); return NULL; } return g_list_reverse (ret); } int ccnet_group_manager_update_group_user (CcnetGroupManager *mgr, const char *old_email, const char *new_email) { int rc; CcnetDB *db = mgr->priv->db; rc = seaf_db_statement_query (db, "UPDATE GroupUser SET user_name=? " "WHERE user_name = ?", 2, "string", new_email, "string", old_email); if (rc < 0){ return -1; } return 0; } ================================================ FILE: common/group-mgr.h ================================================ /* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */ #ifndef GROUP_MGR_H #define GROUP_MGR_H /* #define MAX_GROUP_MEMBERS 16 */ typedef struct _SeafileSession SeafileSession; typedef struct _CcnetGroupManager CcnetGroupManager; typedef struct _CcnetGroupManagerPriv CcnetGroupManagerPriv; struct _CcnetGroupManager { SeafileSession *session; CcnetGroupManagerPriv *priv; }; CcnetGroupManager* ccnet_group_manager_new (SeafileSession *session); int ccnet_group_manager_prepare (CcnetGroupManager *manager); void ccnet_group_manager_start (CcnetGroupManager *manager); int ccnet_group_manager_create_group (CcnetGroupManager *mgr, const char *group_name, const char *user_name, int parent_group_id, GError **error); int ccnet_group_manager_create_org_group (CcnetGroupManager *mgr, int org_id, const char *group_name, const char *user_name, int parent_group_id, GError **error); int ccnet_group_manager_remove_group (CcnetGroupManager *mgr, int group_id, gboolean remove_anyway, GError **error); int ccnet_group_manager_add_member (CcnetGroupManager *mgr, int group_id, const char *user_name, const char *member_name, GError **error); int ccnet_group_manager_remove_member (CcnetGroupManager *mgr, int group_id, const char *user_name, const char *member_name, GError **error); int ccnet_group_manager_set_admin (CcnetGroupManager *mgr, int group_id, const char *member_name, GError **error); int ccnet_group_manager_unset_admin (CcnetGroupManager *mgr, int group_id, const char *member_name, GError **error); int ccnet_group_manager_set_group_name (CcnetGroupManager *mgr, int group_id, const char *group_name, GError **error); int ccnet_group_manager_quit_group (CcnetGroupManager *mgr, int group_id, const char *user_name, GError **error); GList * ccnet_group_manager_get_groups_by_user (CcnetGroupManager *mgr, const char *user_name, gboolean return_ancestors, GError **error); CcnetGroup * ccnet_group_manager_get_group (CcnetGroupManager *mgr, int group_id, GError **error); GList * ccnet_group_manager_get_group_members (CcnetGroupManager *mgr, int group_id, int start, int limit, GError **error); GList * ccnet_group_manager_get_members_with_prefix (CcnetGroupManager *mgr, int group_id, const char *prefix, GError **error); int ccnet_group_manager_check_group_staff (CcnetGroupManager *mgr, int group_id, const char *user_name, int in_structure); int ccnet_group_manager_remove_group_user (CcnetGroupManager *mgr, const char *user); int ccnet_group_manager_is_group_user (CcnetGroupManager *mgr, int group_id, const char *user, gboolean in_structure); GList* ccnet_group_manager_list_all_departments (CcnetGroupManager *mgr, GError **error); GList* ccnet_group_manager_get_all_groups (CcnetGroupManager *mgr, int start, int limit, GError **error); int ccnet_group_manager_set_group_creator (CcnetGroupManager *mgr, int group_id, const char *user_name); GList* ccnet_group_manager_search_groups (CcnetGroupManager *mgr, const char *keyword, int start, int limit); GList* ccnet_group_manager_search_group_members (CcnetGroupManager *mgr, int group_id, const char *pattern); GList * ccnet_group_manager_get_top_groups (CcnetGroupManager *mgr, gboolean including_org, GError **error); GList * ccnet_group_manager_get_child_groups (CcnetGroupManager *mgr, int group_id, GError **error); GList * ccnet_group_manager_get_descendants_groups (CcnetGroupManager *mgr, int group_id, GError **error); GList * ccnet_group_manager_get_ancestor_groups (CcnetGroupManager *mgr, int group_id); GList * ccnet_group_manager_get_groups_members (CcnetGroupManager *mgr, const char *group_ids, GError **error); int ccnet_group_manager_update_group_user (CcnetGroupManager *mgr, const char *old_email, const char *new_email); #endif /* GROUP_MGR_H */ ================================================ FILE: common/log.c ================================================ /* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */ #include "common.h" #include #include #ifndef WIN32 #ifdef SEAFILE_SERVER #include #endif #endif #include "log.h" #include "utils.h" /* message with greater log levels will be ignored */ static int ccnet_log_level; static int seafile_log_level; static char *logfile; static FILE *logfp; static gboolean log_to_stdout = FALSE; static char *app_name; #ifndef WIN32 #ifdef SEAFILE_SERVER static gboolean enable_syslog; #endif #endif #ifndef WIN32 #ifdef SEAFILE_SERVER static int get_syslog_level (GLogLevelFlags level) { switch (level) { case G_LOG_LEVEL_DEBUG: return LOG_DEBUG; case G_LOG_LEVEL_INFO: return LOG_INFO; case G_LOG_LEVEL_WARNING: return LOG_WARNING; case G_LOG_LEVEL_ERROR: return LOG_ERR; case G_LOG_LEVEL_CRITICAL: return LOG_ERR; default: return LOG_DEBUG; } } #endif #endif static void seafile_log (const gchar *log_domain, GLogLevelFlags log_level, const gchar *message, gpointer user_data) { time_t t; struct tm *tm; char buf[1024]; int len; if (log_level > seafile_log_level) return; if (log_to_stdout) { char name_buf[32] = {0}; snprintf(name_buf, sizeof(name_buf), "[%s] ", app_name); fputs (name_buf, logfp); } t = time(NULL); tm = localtime(&t); len = strftime (buf, 1024, "[%Y-%m-%d %H:%M:%S] ", tm); g_return_if_fail (len < 1024); if (logfp) { fputs (buf, logfp); if (log_level == G_LOG_LEVEL_DEBUG) fputs ("[DEBUG] ", logfp); else if (log_level == G_LOG_LEVEL_WARNING) fputs ("[WARNING] ", logfp); else if (log_level == G_LOG_LEVEL_CRITICAL) fputs ("[ERROR] ", logfp); else fputs ("[INFO] ", logfp); fputs (message, logfp); fflush (logfp); } #ifndef WIN32 #ifdef SEAFILE_SERVER if (enable_syslog) syslog (get_syslog_level (log_level), "%s", message); #endif #endif } static void ccnet_log (const gchar *log_domain, GLogLevelFlags log_level, const gchar *message, gpointer user_data) { time_t t; struct tm *tm; char buf[1024]; int len; if (log_level > ccnet_log_level) return; t = time(NULL); tm = localtime(&t); len = strftime (buf, 1024, "[%x %X] ", tm); g_return_if_fail (len < 1024); if (logfp) { fputs (buf, logfp); if (log_level == G_LOG_LEVEL_DEBUG) fputs ("[DEBUG] ", logfp); else if (log_level == G_LOG_LEVEL_WARNING) fputs ("[WARNING] ", logfp); else if (log_level == G_LOG_LEVEL_CRITICAL) fputs ("[ERROR] ", logfp); else fputs ("[INFO] ", logfp); fputs (message, logfp); fflush (logfp); } #ifndef WIN32 #ifdef SEAFILE_SERVER if (enable_syslog) syslog (get_syslog_level (log_level), "%s", message); #endif #endif } static int get_debug_level(const char *str, int default_level) { if (strcmp(str, "debug") == 0) return G_LOG_LEVEL_DEBUG; if (strcmp(str, "info") == 0) return G_LOG_LEVEL_INFO; if (strcmp(str, "warning") == 0) return G_LOG_LEVEL_WARNING; return default_level; } int seafile_log_init (const char *_logfile, const char *ccnet_debug_level_str, const char *seafile_debug_level_str, const char *_app_name) { g_log_set_handler (NULL, G_LOG_LEVEL_MASK | G_LOG_FLAG_FATAL | G_LOG_FLAG_RECURSION, seafile_log, NULL); g_log_set_handler ("Ccnet", G_LOG_LEVEL_MASK | G_LOG_FLAG_FATAL | G_LOG_FLAG_RECURSION, ccnet_log, NULL); /* record all log message */ ccnet_log_level = get_debug_level(ccnet_debug_level_str, G_LOG_LEVEL_INFO); seafile_log_level = get_debug_level(seafile_debug_level_str, G_LOG_LEVEL_DEBUG); app_name = g_strdup (_app_name); const char *log_to_stdout_env = g_getenv("SEAFILE_LOG_TO_STDOUT"); if (g_strcmp0(log_to_stdout_env, "true") == 0) { logfp = stdout; logfile = g_strdup (_logfile); log_to_stdout = TRUE; } else if (g_strcmp0(_logfile, "-") == 0) { logfp = stdout; logfile = g_strdup (_logfile); } else { logfile = ccnet_expand_path(_logfile); if ((logfp = g_fopen (logfile, "a+")) == NULL) { seaf_message ("Failed to open file %s\n", logfile); return -1; } } return 0; } int seafile_log_reopen () { FILE *fp, *oldfp; if (g_strcmp0(logfile, "-") == 0 || log_to_stdout) return 0; if ((fp = g_fopen (logfile, "a+")) == NULL) { seaf_message ("Failed to open file %s\n", logfile); return -1; } //TODO: check file's health oldfp = logfp; logfp = fp; if (fclose(oldfp) < 0) { seaf_message ("Failed to close file %s\n", logfile); return -1; } return 0; } static SeafileDebugFlags debug_flags = 0; static GDebugKey debug_keys[] = { { "Transfer", SEAFILE_DEBUG_TRANSFER }, { "Sync", SEAFILE_DEBUG_SYNC }, { "Watch", SEAFILE_DEBUG_WATCH }, { "Http", SEAFILE_DEBUG_HTTP }, { "Merge", SEAFILE_DEBUG_MERGE }, { "Other", SEAFILE_DEBUG_OTHER }, }; gboolean seafile_debug_flag_is_set (SeafileDebugFlags flag) { return (debug_flags & flag) != 0; } void seafile_debug_set_flags (SeafileDebugFlags flags) { g_message ("Set debug flags %#x\n", flags); debug_flags |= flags; } void seafile_debug_set_flags_string (const gchar *flags_string) { guint nkeys = G_N_ELEMENTS (debug_keys); if (flags_string) seafile_debug_set_flags ( g_parse_debug_string (flags_string, debug_keys, nkeys)); } void seafile_debug_impl (SeafileDebugFlags flag, const gchar *format, ...) { if (flag & debug_flags) { va_list args; va_start (args, format); g_logv (G_LOG_DOMAIN, G_LOG_LEVEL_DEBUG, format, args); va_end (args); } } #ifndef WIN32 #ifdef SEAFILE_SERVER void set_syslog_config (GKeyFile *config) { enable_syslog = g_key_file_get_boolean (config, "general", "enable_syslog", NULL); if (enable_syslog) openlog (NULL, LOG_NDELAY | LOG_PID, LOG_USER); } #endif #endif ================================================ FILE: common/log.h ================================================ #ifndef LOG_H #define LOG_H #define SEAFILE_DOMAIN g_quark_from_string("seafile") #ifndef seaf_warning #define seaf_warning(fmt, ...) g_warning("%s(%d): " fmt, __FILE__, __LINE__, ##__VA_ARGS__) #endif #ifndef seaf_message #define seaf_message(fmt, ...) g_message("%s(%d): " fmt, __FILE__, __LINE__, ##__VA_ARGS__) #endif #ifndef seaf_error #define seaf_error(fmt, ...) g_critical("%s(%d): " fmt, __FILE__, __LINE__, ##__VA_ARGS__) #endif int seafile_log_init (const char *logfile, const char *ccnet_debug_level_str, const char *seafile_debug_level_str, const char *_app_name); int seafile_log_reopen (); #ifndef WIN32 #ifdef SEAFILE_SERVER void set_syslog_config (GKeyFile *config); #endif #endif void seafile_debug_set_flags_string (const gchar *flags_string); typedef enum { SEAFILE_DEBUG_TRANSFER = 1 << 1, SEAFILE_DEBUG_SYNC = 1 << 2, SEAFILE_DEBUG_WATCH = 1 << 3, /* wt-monitor */ SEAFILE_DEBUG_HTTP = 1 << 4, /* http server */ SEAFILE_DEBUG_MERGE = 1 << 5, SEAFILE_DEBUG_OTHER = 1 << 6, } SeafileDebugFlags; void seafile_debug_impl (SeafileDebugFlags flag, const gchar *format, ...); #ifdef DEBUG_FLAG #undef seaf_debug #define seaf_debug(fmt, ...) \ seafile_debug_impl (DEBUG_FLAG, "%.10s(%d): " fmt, __FILE__, __LINE__, ##__VA_ARGS__) #endif /* DEBUG_FLAG */ #endif ================================================ FILE: common/merge-new.c ================================================ #include "common.h" #include "seafile-session.h" #include "merge-new.h" #include "vc-common.h" #define DEBUG_FLAG SEAFILE_DEBUG_MERGE #include "log.h" static int merge_trees_recursive (const char *store_id, int version, int n, SeafDir *trees[], const char *basedir, MergeOptions *opt); static const char * get_nickname_by_modifier (GHashTable *email_to_nickname, const char *modifier) { const char *nickname = NULL; if (!modifier) { return NULL; } nickname = g_hash_table_lookup (email_to_nickname, modifier); if (nickname) { return nickname; } nickname = http_tx_manager_get_nickname (modifier); if (!nickname) { nickname = g_strdup (modifier); } g_hash_table_insert (email_to_nickname, g_strdup(modifier), nickname); return nickname; } static char * merge_conflict_filename (const char *store_id, int version, MergeOptions *opt, const char *basedir, const char *filename) { char *path = NULL, *modifier = NULL, *conflict_name = NULL; const char *nickname = NULL; gint64 mtime; SeafCommit *commit; path = g_strconcat (basedir, filename, NULL); int rc = get_file_modifier_mtime (opt->remote_repo_id, store_id, version, opt->remote_head, path, &modifier, &mtime); if (rc < 0) { commit = seaf_commit_manager_get_commit (seaf->commit_mgr, opt->remote_repo_id, version, opt->remote_head); if (!commit) { seaf_warning ("Failed to find remote head %s:%s.\n", opt->remote_repo_id, opt->remote_head); goto out; } modifier = g_strdup(commit->creator_name); mtime = (gint64)time(NULL); seaf_commit_unref (commit); } nickname = modifier; if (seaf->seahub_pk) nickname = get_nickname_by_modifier (opt->email_to_nickname, modifier); conflict_name = gen_conflict_path (filename, nickname, mtime); out: g_free (path); g_free (modifier); return conflict_name; } static char * merge_conflict_dirname (const char *store_id, int version, MergeOptions *opt, const char *basedir, const char *dirname) { char *modifier = NULL, *conflict_name = NULL; const char *nickname = NULL; SeafCommit *commit; commit = seaf_commit_manager_get_commit (seaf->commit_mgr, opt->remote_repo_id, version, opt->remote_head); if (!commit) { seaf_warning ("Failed to find remote head %s:%s.\n", opt->remote_repo_id, opt->remote_head); goto out; } modifier = g_strdup(commit->creator_name); seaf_commit_unref (commit); nickname = modifier; if (seaf->seahub_pk) nickname = get_nickname_by_modifier (opt->email_to_nickname, modifier); conflict_name = gen_conflict_path (dirname, nickname, (gint64)time(NULL)); out: g_free (modifier); return conflict_name; } int twoway_merge(const char *store_id, int version, const char *basedir, SeafDirent *dents[], GList **dents_out, struct MergeOptions *opt) { SeafDirent *files[2]; int i; int n = opt->n_ways; memset (files, 0, sizeof(files[0])*n); for (i = 0; i < n; ++i) { if (dents[i] && S_ISREG(dents[i]->mode)) files[i] = dents[i]; } SeafDirent *head, *remote; char *conflict_name; head = files[0]; remote = files[1]; if (head && remote) { if (strcmp (head->id, remote->id) == 0) { // file match seaf_debug ("%s%s: files match\n", basedir, head->name); *dents_out = g_list_prepend (*dents_out, seaf_dirent_dup(head)); } else { // file content conflict seaf_debug ("%s%s: files conflict\n", basedir, head->name); conflict_name = merge_conflict_filename(store_id, version, opt, basedir, head->name); if (!conflict_name) return -1; g_free (remote->name); remote->name = conflict_name; remote->name_len = strlen (remote->name); *dents_out = g_list_prepend (*dents_out, seaf_dirent_dup(head)); *dents_out = g_list_prepend (*dents_out, seaf_dirent_dup(remote)); opt->conflict = TRUE; } } else if (!head && remote) { // file not in head, but in remote seaf_debug ("%s%s: added in remote\n", basedir, remote->name); *dents_out = g_list_prepend (*dents_out, seaf_dirent_dup(remote)); } else if (head && !remote) { // file in head, but not in remote seaf_debug ("%s%s: added in head\n", basedir, head->name); *dents_out = g_list_prepend (*dents_out, seaf_dirent_dup(head)); } return 0; } static int threeway_merge (const char *store_id, int version, SeafDirent *dents[], const char *basedir, GList **dents_out, MergeOptions *opt) { SeafDirent *files[3]; int i; gint64 curr_time; int n = opt->n_ways; memset (files, 0, sizeof(files[0])*n); for (i = 0; i < n; ++i) { if (dents[i] && S_ISREG(dents[i]->mode)) files[i] = dents[i]; } SeafDirent *base, *head, *remote; char *conflict_name; base = files[0]; head = files[1]; remote = files[2]; if (head && remote) { if (strcmp (head->id, remote->id) == 0) { seaf_debug ("%s%s: files match\n", basedir, head->name); *dents_out = g_list_prepend (*dents_out, seaf_dirent_dup(head)); } else if (base && strcmp (base->id, head->id) == 0) { seaf_debug ("%s%s: unchanged in head, changed in remote\n", basedir, head->name); *dents_out = g_list_prepend (*dents_out, seaf_dirent_dup(remote)); } else if (base && strcmp (base->id, remote->id) == 0) { seaf_debug ("%s%s: unchanged in remote, changed in head\n", basedir, head->name); *dents_out = g_list_prepend (*dents_out, seaf_dirent_dup(head)); } else { /* File content conflict. */ seaf_debug ("%s%s: files conflict\n", basedir, head->name); conflict_name = merge_conflict_filename(store_id, version, opt, basedir, head->name); if (!conflict_name) return -1; /* Change remote entry name in place. So opt->callback * will see the conflict name, not the original name. */ g_free (remote->name); remote->name = conflict_name; remote->name_len = strlen (remote->name); *dents_out = g_list_prepend (*dents_out, seaf_dirent_dup(head)); *dents_out = g_list_prepend (*dents_out, seaf_dirent_dup(remote)); opt->conflict = TRUE; } } else if (base && !head && remote) { if (strcmp (base->id, remote->id) != 0) { if (dents[1] != NULL) { /* D/F conflict: * Head replaces file with dir, while remote change the file. */ seaf_debug ("%s%s: DFC, file -> dir, file\n", basedir, remote->name); conflict_name = merge_conflict_filename(store_id, version, opt, basedir, remote->name); if (!conflict_name) return -1; /* Change the name of remote, keep dir name in head unchanged. */ g_free (remote->name); remote->name = conflict_name; remote->name_len = strlen (remote->name); *dents_out = g_list_prepend (*dents_out, seaf_dirent_dup(remote)); opt->conflict = TRUE; } else { /* Deleted in head and changed in remote. */ seaf_debug ("%s%s: deleted in head and changed in remote\n", basedir, remote->name); /* Keep version of remote. */ *dents_out = g_list_prepend (*dents_out, seaf_dirent_dup(remote)); } } else { /* If base and remote match, the file should not be added to * the merge result. */ seaf_debug ("%s%s: file deleted in head, unchanged in remote\n", basedir, remote->name); } } else if (base && head && !remote) { if (strcmp (base->id, head->id) != 0) { if (dents[2] != NULL) { /* D/F conflict: * Remote replaces file with dir, while head change the file. */ seaf_debug ("%s%s: DFC, file -> file, dir\n", basedir, head->name); /* We use remote head commit author name as conflict * suffix of a dir. */ conflict_name = merge_conflict_dirname (store_id, version, opt, basedir, dents[2]->name); if (!conflict_name) return -1; /* Change remote dir name to conflict name in place. */ g_free (dents[2]->name); dents[2]->name = conflict_name; dents[2]->name_len = strlen (dents[2]->name); *dents_out = g_list_prepend (*dents_out, seaf_dirent_dup(head)); opt->conflict = TRUE; } else { /* Deleted in remote and changed in head. */ seaf_debug ("%s%s: deleted in remote and changed in head\n", basedir, head->name); /* Keep version of remote. */ *dents_out = g_list_prepend (*dents_out, seaf_dirent_dup(head)); } } else { /* If base and head match, the file should not be added to * the merge result. */ seaf_debug ("%s%s: file deleted in remote, unchanged in head\n", basedir, head->name); } } else if (!base && !head && remote) { if (!dents[1]) { /* Added in remote. */ seaf_debug ("%s%s: added in remote\n", basedir, remote->name); *dents_out = g_list_prepend (*dents_out, seaf_dirent_dup(remote)); } else if (dents[0] != NULL && strcmp(dents[0]->id, dents[1]->id) == 0) { /* Contents in the dir is not changed. * The dir will be deleted in merge_directories(). */ seaf_debug ("%s%s: dir in head will be replaced by file in remote\n", basedir, remote->name); *dents_out = g_list_prepend (*dents_out, seaf_dirent_dup(remote)); } else { /* D/F conflict: * Contents of the dir is changed in head, while * remote replace the dir with a file. * * Or, head adds a new dir, while remote adds a new file, * with the same name. */ seaf_debug ("%s%s: DFC, dir -> dir, file\n", basedir, remote->name); conflict_name = merge_conflict_filename(store_id, version, opt, basedir, remote->name); if (!conflict_name) return -1; g_free (remote->name); remote->name = conflict_name; remote->name_len = strlen (remote->name); *dents_out = g_list_prepend (*dents_out, seaf_dirent_dup(remote)); opt->conflict = TRUE; } } else if (!base && head && !remote) { if (!dents[2]) { /* Added in remote. */ seaf_debug ("%s%s: added in head\n", basedir, head->name); *dents_out = g_list_prepend (*dents_out, seaf_dirent_dup(head)); } else if (dents[0] != NULL && strcmp(dents[0]->id, dents[2]->id) == 0) { /* Contents in the dir is not changed. * The dir will be deleted in merge_directories(). */ seaf_debug ("%s%s: dir in remote will be replaced by file in head\n", basedir, head->name); *dents_out = g_list_prepend (*dents_out, seaf_dirent_dup(head)); } else { /* D/F conflict: * Contents of the dir is changed in remote, while * head replace the dir with a file. * * Or, remote adds a new dir, while head adds a new file, * with the same name. */ seaf_debug ("%s%s: DFC, dir -> file, dir\n", basedir, head->name); conflict_name = merge_conflict_dirname (store_id, version, opt, basedir, dents[2]->name); if (!conflict_name) return -1; g_free (dents[2]->name); dents[2]->name = conflict_name; dents[2]->name_len = strlen (dents[2]->name); *dents_out = g_list_prepend (*dents_out, seaf_dirent_dup(head)); opt->conflict = TRUE; } } else if (base && !head && !remote) { /* Don't need to add anything to dents_out. */ seaf_debug ("%s%s: deleted in head and remote\n", basedir, base->name); } return 0; } static int merge_entries (const char *store_id, int version, int n, SeafDirent *dents[], const char *basedir, GList **dents_out, MergeOptions *opt) { /* If we're running 2-way merge, it means merge files base on head and remote. */ if (n == 2) return twoway_merge (store_id, version, basedir, dents, dents_out, opt); /* Otherwise, we're doing a real 3-way merge of the trees. * It means merge files and handle any conflicts. */ return threeway_merge (store_id, version, dents, basedir, dents_out, opt); } static int merge_directories (const char *store_id, int version, int n, SeafDirent *dents[], const char *basedir, GList **dents_out, MergeOptions *opt) { SeafDir *dir; SeafDir *sub_dirs[3]; char *dirname = NULL; char *new_basedir; int ret = 0; int dir_mask = 0, i; SeafDirent *merged_dent; for (i = 0; i < n; ++i) { if (dents[i] && S_ISDIR(dents[i]->mode)) dir_mask |= 1 << i; } seaf_debug ("dir_mask = %d\n", dir_mask); if (n == 3) { switch (dir_mask) { case 0: g_return_val_if_reached (-1); case 1: /* head and remote are not dirs, nothing to merge. */ seaf_debug ("%s%s: no dir, no need to merge\n", basedir, dents[0]->name); return 0; case 2: /* only head is dir, add to result directly, no need to merge. */ seaf_debug ("%s%s: only head is dir\n", basedir, dents[1]->name); *dents_out = g_list_prepend (*dents_out, seaf_dirent_dup(dents[1])); return 0; case 3: if (strcmp (dents[0]->id, dents[1]->id) == 0) { /* Base and head are the same, but deleted in remote. */ seaf_debug ("%s%s: dir deleted in remote\n", basedir, dents[0]->name); return 0; } seaf_debug ("%s%s: dir changed in head but deleted in remote\n", basedir, dents[1]->name); break; case 4: /* only remote is dir, add to result directly, no need to merge. */ seaf_debug ("%s%s: only remote is dir\n", basedir, dents[2]->name); *dents_out = g_list_prepend (*dents_out, seaf_dirent_dup(dents[2])); return 0; case 5: if (strcmp (dents[0]->id, dents[2]->id) == 0) { /* Base and remote are the same, but deleted in head. */ seaf_debug ("%s%s: dir deleted in head\n", basedir, dents[0]->name); return 0; } seaf_debug ("%s%s: dir changed in remote but deleted in head\n", basedir, dents[2]->name); break; case 6: case 7: if (strcmp (dents[1]->id, dents[2]->id) == 0) { /* Head and remote match. */ seaf_debug ("%s%s: dir is the same in head and remote\n", basedir, dents[1]->name); *dents_out = g_list_prepend (*dents_out, seaf_dirent_dup(dents[1])); return 0; } else if (dents[0] && strcmp(dents[0]->id, dents[1]->id) == 0) { seaf_debug ("%s%s: dir changed in remote but unchanged in head\n", basedir, dents[1]->name); *dents_out = g_list_prepend (*dents_out, seaf_dirent_dup(dents[2])); return 0; } else if (dents[0] && strcmp(dents[0]->id, dents[2]->id) == 0) { seaf_debug ("%s%s: dir changed in head but unchanged in remote\n", basedir, dents[1]->name); *dents_out = g_list_prepend (*dents_out, seaf_dirent_dup(dents[1])); return 0; } seaf_debug ("%s%s: dir is changed in both head and remote, " "merge recursively\n", basedir, dents[1]->name); break; default: g_return_val_if_reached (-1); } } else if (n == 2) { switch (dir_mask) { case 0: g_return_val_if_reached (-1); case 1: /*head is dir, remote is not dir*/ seaf_debug ("%s%s: only head is dir\n", basedir, dents[0]->name); *dents_out = g_list_prepend (*dents_out, seaf_dirent_dup(dents[0])); return 0; case 2: /*head is not dir, remote is dir*/ seaf_debug ("%s%s: only remote is dir\n", basedir, dents[1]->name); *dents_out = g_list_prepend (*dents_out, seaf_dirent_dup(dents[1])); return 0; case 3: if (strcmp (dents[0]->id, dents[1]->id) == 0) { seaf_debug ("%s%s: dir is the same in head and remote\n", basedir, dents[0]->name); *dents_out = g_list_prepend (*dents_out, seaf_dirent_dup(dents[1])); return 0; } seaf_debug ("%s%s: dir is changed in head and remote, merge recursively\n", basedir, dents[0]->name); break; default: g_return_val_if_reached (-1); } } memset (sub_dirs, 0, sizeof(sub_dirs[0])*n); for (i = 0; i < n; ++i) { if (dents[i] != NULL && S_ISDIR(dents[i]->mode)) { dir = seaf_fs_manager_get_seafdir (seaf->fs_mgr, store_id, version, dents[i]->id); if (!dir) { seaf_warning ("Failed to find dir %s:%s.\n", store_id, dents[i]->id); ret = -1; goto free_sub_dirs; } opt->visit_dirs++; sub_dirs[i] = dir; dirname = dents[i]->name; } } new_basedir = g_strconcat (basedir, dirname, "/", NULL); ret = merge_trees_recursive (store_id, version, n, sub_dirs, new_basedir, opt); g_free (new_basedir); if (n == 3) { if (dir_mask == 3 || dir_mask == 6 || dir_mask == 7) { merged_dent = seaf_dirent_dup (dents[1]); memcpy (merged_dent->id, opt->merged_tree_root, 40); *dents_out = g_list_prepend (*dents_out, merged_dent); } else if (dir_mask == 5) { merged_dent = seaf_dirent_dup (dents[2]); memcpy (merged_dent->id, opt->merged_tree_root, 40); *dents_out = g_list_prepend (*dents_out, merged_dent); } } else if (n == 2) { if (dir_mask == 3) { merged_dent = seaf_dirent_dup (dents[1]); memcpy (merged_dent->id, opt->merged_tree_root, 40); *dents_out = g_list_prepend (*dents_out, merged_dent); } } free_sub_dirs: for (i = 0; i < n; ++i) seaf_dir_free (sub_dirs[i]); return ret; } static gint compare_dirents (gconstpointer a, gconstpointer b) { const SeafDirent *denta = a, *dentb = b; return strcmp (dentb->name, denta->name); } static int merge_trees_recursive (const char *store_id, int version, int n, SeafDir *trees[], const char *basedir, MergeOptions *opt) { GList *ptrs[3]; SeafDirent *dents[3]; int i; SeafDirent *dent; char *first_name; gboolean done; int ret = 0; SeafDir *merged_tree; GList *merged_dents = NULL; for (i = 0; i < n; ++i) { if (trees[i]) ptrs[i] = trees[i]->entries; else ptrs[i] = NULL; } while (1) { first_name = NULL; memset (dents, 0, sizeof(dents[0])*n); done = TRUE; /* Find the "largest" name, assuming dirents are sorted. */ for (i = 0; i < n; ++i) { if (ptrs[i] != NULL) { done = FALSE; dent = ptrs[i]->data; if (!first_name) first_name = dent->name; else if (strcmp(dent->name, first_name) > 0) first_name = dent->name; } } if (done) break; /* * Setup dir entries for all names that equal to first_name */ int n_files = 0, n_dirs = 0; for (i = 0; i < n; ++i) { if (ptrs[i] != NULL) { dent = ptrs[i]->data; if (strcmp(first_name, dent->name) == 0) { if (S_ISREG(dent->mode)) ++n_files; else if (S_ISDIR(dent->mode)) ++n_dirs; dents[i] = dent; ptrs[i] = ptrs[i]->next; } } } /* Merge entries of this level. */ if (n_files > 0) { ret = merge_entries (store_id, version, n, dents, basedir, &merged_dents, opt); if (ret < 0) return ret; } /* Recurse into sub level. */ if (n_dirs > 0) { ret = merge_directories (store_id, version, n, dents, basedir, &merged_dents, opt); if (ret < 0) return ret; } } if (n == 3) { merged_dents = g_list_sort (merged_dents, compare_dirents); merged_tree = seaf_dir_new (NULL, merged_dents, dir_version_from_repo_version(version)); memcpy (opt->merged_tree_root, merged_tree->dir_id, 40); if ((trees[1] && strcmp (trees[1]->dir_id, merged_tree->dir_id) == 0) || (trees[2] && strcmp (trees[2]->dir_id, merged_tree->dir_id) == 0)) { seaf_dir_free (merged_tree); } else { ret = seaf_dir_save (seaf->fs_mgr, store_id, version, merged_tree); seaf_dir_free (merged_tree); if (ret < 0) { seaf_warning ("Failed to save merged tree %s:%s.\n", store_id, basedir); } } } else if (n == 2) { merged_dents = g_list_sort (merged_dents, compare_dirents); merged_tree = seaf_dir_new (NULL, merged_dents, dir_version_from_repo_version(version)); memcpy (opt->merged_tree_root, merged_tree->dir_id, 40); if ((trees[0] && strcmp (trees[0]->dir_id, merged_tree->dir_id) == 0) || (trees[1] && strcmp (trees[1]->dir_id, merged_tree->dir_id) == 0)) { seaf_dir_free (merged_tree); } else { ret = seaf_dir_save (seaf->fs_mgr, store_id, version, merged_tree); seaf_dir_free (merged_tree); if (ret < 0) { seaf_warning ("Failed to save merged tree %s:%s.\n", store_id, basedir); } } } return ret; } int seaf_merge_trees (const char *store_id, int version, int n, const char *roots[], MergeOptions *opt) { SeafDir **trees, *root; int i, ret; g_return_val_if_fail (n == 2 || n == 3, -1); opt->email_to_nickname = g_hash_table_new_full(g_str_hash, g_str_equal, g_free, g_free); trees = g_new0 (SeafDir *, n); for (i = 0; i < n; ++i) { root = seaf_fs_manager_get_seafdir (seaf->fs_mgr, store_id, version, roots[i]); if (!root) { seaf_warning ("Failed to find dir %s:%s.\n", store_id, roots[i]); g_free (trees); return -1; } trees[i] = root; } ret = merge_trees_recursive (store_id, version, n, trees, "", opt); for (i = 0; i < n; ++i) seaf_dir_free (trees[i]); g_free (trees); g_hash_table_destroy (opt->email_to_nickname); return ret; } ================================================ FILE: common/merge-new.h ================================================ #ifndef MERGE_NEW_H #define MERGE_NEW_H #include "common.h" #include "fs-mgr.h" struct MergeOptions; typedef int (*MergeCallback) (const char *basedir, SeafDirent *dirents[], struct MergeOptions *opt); typedef struct MergeOptions { int n_ways; /* only 2 and 3 way merges are supported. */ MergeCallback callback; void * data; /* options only used in 3-way merge. */ char remote_repo_id[37]; char remote_head[41]; gboolean do_merge; /* really merge the contents * and handle conflicts */ char merged_tree_root[41]; /* merge result */ int visit_dirs; gboolean conflict; GHashTable *email_to_nickname; } MergeOptions; int seaf_merge_trees (const char *store_id, int version, int n, const char *roots[], MergeOptions *opt); #endif ================================================ FILE: common/mq-mgr.c ================================================ #include "common.h" #include "log.h" #include "utils.h" #include "mq-mgr.h" typedef struct SeafMqManagerPriv { // chan <-> async_queue GHashTable *chans; } SeafMqManagerPriv; SeafMqManager * seaf_mq_manager_new () { SeafMqManager *mgr = g_new0 (SeafMqManager, 1); mgr->priv = g_new0 (SeafMqManagerPriv, 1); mgr->priv->chans = g_hash_table_new_full (g_str_hash, g_str_equal, (GDestroyNotify)g_free, (GDestroyNotify)g_async_queue_unref); return mgr; } static GAsyncQueue * seaf_mq_manager_channel_new (SeafMqManager *mgr, const char *channel) { GAsyncQueue *async_queue = NULL; async_queue = g_async_queue_new_full ((GDestroyNotify)json_decref); g_hash_table_replace (mgr->priv->chans, g_strdup (channel), async_queue); return async_queue; } int seaf_mq_manager_publish_event (SeafMqManager *mgr, const char *channel, const char *content) { int ret = 0; if (!channel || !content) { seaf_warning ("type and content should not be NULL.\n"); return -1; } GAsyncQueue *async_queue = g_hash_table_lookup (mgr->priv->chans, channel); if (!async_queue) { async_queue = seaf_mq_manager_channel_new(mgr, channel); } if (!async_queue) { seaf_warning("%s channel creation failed.\n", channel); return -1; } json_t *msg = json_object(); json_object_set_new (msg, "content", json_string(content)); json_object_set_new (msg, "ctime", json_integer(time(NULL))); g_async_queue_push (async_queue, msg); return ret; } json_t * seaf_mq_manager_pop_event (SeafMqManager *mgr, const char *channel) { GAsyncQueue *async_queue = g_hash_table_lookup (mgr->priv->chans, channel); if (!async_queue) return NULL; return g_async_queue_try_pop (async_queue); } ================================================ FILE: common/mq-mgr.h ================================================ #ifndef SEAF_MQ_MANAGER_H #define SEAF_MQ_MANAGER_H #include #define SEAFILE_SERVER_CHANNEL_EVENT "seaf_server.event" #define SEAFILE_SERVER_CHANNEL_STATS "seaf_server.stats" struct SeafMqManagerPriv; typedef struct SeafMqManager { struct SeafMqManagerPriv *priv; } SeafMqManager; SeafMqManager * seaf_mq_manager_new (); int seaf_mq_manager_publish_event (SeafMqManager *mgr, const char *channel, const char *content); json_t * seaf_mq_manager_pop_event (SeafMqManager *mgr, const char *channel); #endif ================================================ FILE: common/obj-backend-fs.c ================================================ #ifndef _WIN32_WINNT #define _WIN32_WINNT 0x500 #endif #include "common.h" #include "utils.h" #include "obj-backend.h" #ifndef WIN32 #include #include #include #endif #ifdef WIN32 #include #include #endif #define DEBUG_FLAG SEAFILE_DEBUG_OTHER #include "log.h" typedef struct FsPriv { char *obj_dir; int dir_len; } FsPriv; static void id_to_path (FsPriv *priv, const char *obj_id, char path[], const char *repo_id, int version) { char *pos = path; int n; #if defined MIGRATION || defined SEAFILE_CLIENT if (version > 0) { n = snprintf (path, SEAF_PATH_MAX, "%s/%s/", priv->obj_dir, repo_id); pos += n; } #else n = snprintf (path, SEAF_PATH_MAX, "%s/%s/", priv->obj_dir, repo_id); pos += n; #endif memcpy (pos, obj_id, 2); pos[2] = '/'; pos += 3; memcpy (pos, obj_id + 2, 41 - 2); } static int obj_backend_fs_read (ObjBackend *bend, const char *repo_id, int version, const char *obj_id, void **data, int *len) { char path[SEAF_PATH_MAX]; gsize tmp_len; GError *error = NULL; id_to_path (bend->priv, obj_id, path, repo_id, version); /* seaf_debug ("object path: %s\n", path); */ g_file_get_contents (path, (gchar**)data, &tmp_len, &error); if (error) { #ifdef MIGRATION g_clear_error (&error); id_to_path (bend->priv, obj_id, path, repo_id, 1); g_file_get_contents (path, (gchar**)data, &tmp_len, &error); if (error) { seaf_debug ("[obj backend] Failed to read object %s: %s.\n", obj_id, error->message); g_clear_error (&error); return -1; } #else seaf_debug ("[obj backend] Failed to read object %s: %s.\n", obj_id, error->message); g_clear_error (&error); return -1; #endif } *len = (int)tmp_len; return 0; } /* * Flush operating system and disk caches for @fd. */ static int fsync_obj_contents (int fd) { #ifdef __linux__ /* Some file systems may not support fsync(). * In this case, just skip the error. */ if (fsync (fd) < 0) { if (errno == EINVAL) return 0; else { seaf_warning ("Failed to fsync: %s.\n", strerror(errno)); return -1; } } return 0; #endif #ifdef __APPLE__ /* OS X: fcntl() is required to flush disk cache, fsync() only * flushes operating system cache. */ if (fcntl (fd, F_FULLFSYNC, NULL) < 0) { seaf_warning ("Failed to fsync: %s.\n", strerror(errno)); return -1; } return 0; #endif #ifdef WIN32 HANDLE handle; handle = (HANDLE)_get_osfhandle (fd); if (handle == INVALID_HANDLE_VALUE) { seaf_warning ("Failed to get handle from fd.\n"); return -1; } if (!FlushFileBuffers (handle)) { seaf_warning ("FlushFileBuffer() failed: %lu.\n", GetLastError()); return -1; } return 0; #endif } /* * Rename file from @tmp_path to @obj_path. * This also makes sure the changes to @obj_path's parent folder * is flushed to disk. */ static int rename_and_sync (const char *tmp_path, const char *obj_path) { #ifdef __linux__ char *parent_dir; int ret = 0; if (rename (tmp_path, obj_path) < 0) { seaf_warning ("Failed to rename from %s to %s: %s.\n", tmp_path, obj_path, strerror(errno)); return -1; } parent_dir = g_path_get_dirname (obj_path); int dir_fd = open (parent_dir, O_RDONLY); if (dir_fd < 0) { seaf_warning ("Failed to open dir %s: %s.\n", parent_dir, strerror(errno)); goto out; } /* Some file systems don't support fsyncing a directory. Just ignore the error. */ if (fsync (dir_fd) < 0) { if (errno != EINVAL) { seaf_warning ("Failed to fsync dir %s: %s.\n", parent_dir, strerror(errno)); ret = -1; } goto out; } out: g_free (parent_dir); if (dir_fd >= 0) close (dir_fd); return ret; #endif #ifdef __APPLE__ /* * OS X garantees an existence of obj_path always exists, * even when the system crashes. */ if (rename (tmp_path, obj_path) < 0) { seaf_warning ("Failed to rename from %s to %s: %s.\n", tmp_path, obj_path, strerror(errno)); return -1; } return 0; #endif #ifdef WIN32 wchar_t *w_tmp_path = g_utf8_to_utf16 (tmp_path, -1, NULL, NULL, NULL); wchar_t *w_obj_path = g_utf8_to_utf16 (obj_path, -1, NULL, NULL, NULL); int ret = 0; if (!MoveFileExW (w_tmp_path, w_obj_path, MOVEFILE_REPLACE_EXISTING | MOVEFILE_WRITE_THROUGH)) { seaf_warning ("MoveFilExW failed: %lu.\n", GetLastError()); ret = -1; goto out; } out: g_free (w_tmp_path); g_free (w_obj_path); return ret; #endif } static int save_obj_contents (const char *path, const void *data, int len, gboolean need_sync) { char tmp_path[SEAF_PATH_MAX]; int fd; snprintf (tmp_path, SEAF_PATH_MAX, "%s.XXXXXX", path); fd = g_mkstemp (tmp_path); if (fd < 0) { seaf_warning ("[obj backend] Failed to open tmp file %s: %s.\n", tmp_path, strerror(errno)); return -1; } if (writen (fd, data, len) < 0) { seaf_warning ("[obj backend] Failed to write obj %s: %s.\n", tmp_path, strerror(errno)); return -1; } if (need_sync && fsync_obj_contents (fd) < 0) return -1; /* Close may return error, especially in NFS. */ if (close (fd) < 0) { seaf_warning ("[obj backend Failed close obj %s: %s.\n", tmp_path, strerror(errno)); return -1; } if (need_sync) { if (rename_and_sync (tmp_path, path) < 0) return -1; } else { if (g_rename (tmp_path, path) < 0) { seaf_warning ("[obj backend] Failed to rename %s: %s.\n", path, strerror(errno)); return -1; } } return 0; } static int create_parent_path (const char *path) { char *dir = g_path_get_dirname (path); if (!dir) return -1; if (g_file_test (dir, G_FILE_TEST_EXISTS)) { g_free (dir); return 0; } if (g_mkdir_with_parents (dir, 0777) < 0) { seaf_warning ("Failed to create object parent path %s: %s.\n", dir, strerror(errno)); g_free (dir); return -1; } g_free (dir); return 0; } static int obj_backend_fs_write (ObjBackend *bend, const char *repo_id, int version, const char *obj_id, void *data, int len, gboolean need_sync) { char path[SEAF_PATH_MAX]; id_to_path (bend->priv, obj_id, path, repo_id, version); /* GTimeVal s, e; */ /* g_get_current_time (&s); */ if (create_parent_path (path) < 0) { seaf_warning ("[obj backend] Failed to create path for obj %s:%s.\n", repo_id, obj_id); return -1; } if (save_obj_contents (path, data, len, need_sync) < 0) { seaf_warning ("[obj backend] Failed to write obj %s:%s.\n", repo_id, obj_id); return -1; } /* g_get_current_time (&e); */ /* seaf_message ("write obj time: %ldus.\n", */ /* ((e.tv_sec*1000000+e.tv_usec) - (s.tv_sec*1000000+s.tv_usec))); */ return 0; } static gboolean obj_backend_fs_exists (ObjBackend *bend, const char *repo_id, int version, const char *obj_id) { char path[SEAF_PATH_MAX]; SeafStat st; id_to_path (bend->priv, obj_id, path, repo_id, version); if (seaf_stat (path, &st) == 0) return TRUE; return FALSE; } static void obj_backend_fs_delete (ObjBackend *bend, const char *repo_id, int version, const char *obj_id) { char path[SEAF_PATH_MAX]; id_to_path (bend->priv, obj_id, path, repo_id, version); g_unlink (path); } static int obj_backend_fs_foreach_obj (ObjBackend *bend, const char *repo_id, int version, SeafObjFunc process, void *user_data) { FsPriv *priv = bend->priv; char *obj_dir = NULL; int dir_len; GDir *dir1 = NULL, *dir2; const char *dname1, *dname2; char obj_id[128]; char path[SEAF_PATH_MAX], *pos; int ret = 0; #if defined MIGRATION || defined SEAFILE_CLIENT if (version > 0) obj_dir = g_build_filename (priv->obj_dir, repo_id, NULL); #else obj_dir = g_build_filename (priv->obj_dir, repo_id, NULL); #endif dir_len = strlen (obj_dir); dir1 = g_dir_open (obj_dir, 0, NULL); if (!dir1) { goto out; } memcpy (path, obj_dir, dir_len); pos = path + dir_len; while ((dname1 = g_dir_read_name(dir1)) != NULL) { snprintf (pos, sizeof(path) - dir_len, "/%s", dname1); dir2 = g_dir_open (path, 0, NULL); if (!dir2) { seaf_warning ("Failed to open object dir %s.\n", path); continue; } while ((dname2 = g_dir_read_name(dir2)) != NULL) { snprintf (obj_id, sizeof(obj_id), "%s%s", dname1, dname2); if (!process (repo_id, version, obj_id, user_data)) { g_dir_close (dir2); goto out; } } g_dir_close (dir2); } out: if (dir1) g_dir_close (dir1); g_free (obj_dir); return ret; } static int obj_backend_fs_copy (ObjBackend *bend, const char *src_repo_id, int src_version, const char *dst_repo_id, int dst_version, const char *obj_id) { char src_path[SEAF_PATH_MAX]; char dst_path[SEAF_PATH_MAX]; id_to_path (bend->priv, obj_id, src_path, src_repo_id, src_version); id_to_path (bend->priv, obj_id, dst_path, dst_repo_id, dst_version); if (g_file_test (dst_path, G_FILE_TEST_EXISTS)) return 0; if (create_parent_path (dst_path) < 0) { seaf_warning ("Failed to create dst path %s for obj %s.\n", dst_path, obj_id); return -1; } #ifdef WIN32 if (!CreateHardLink (dst_path, src_path, NULL)) { seaf_warning ("Failed to link %s to %s: %lu.\n", src_path, dst_path, GetLastError()); return -1; } return 0; #else int ret = link (src_path, dst_path); if (ret < 0 && errno != EEXIST) { seaf_warning ("Failed to link %s to %s: %s.\n", src_path, dst_path, strerror(errno)); return -1; } return ret; #endif } static int obj_backend_fs_remove_store (ObjBackend *bend, const char *store_id) { FsPriv *priv = bend->priv; char *obj_dir = NULL; GDir *dir1, *dir2; const char *dname1, *dname2; char *path1, *path2; obj_dir = g_build_filename (priv->obj_dir, store_id, NULL); dir1 = g_dir_open (obj_dir, 0, NULL); if (!dir1) { g_free (obj_dir); return 0; } while ((dname1 = g_dir_read_name(dir1)) != NULL) { path1 = g_build_filename (obj_dir, dname1, NULL); dir2 = g_dir_open (path1, 0, NULL); if (!dir2) { seaf_warning ("Failed to open obj dir %s.\n", path1); g_dir_close (dir1); g_free (path1); g_free (obj_dir); return -1; } while ((dname2 = g_dir_read_name(dir2)) != NULL) { path2 = g_build_filename (path1, dname2, NULL); g_unlink (path2); g_free (path2); } g_dir_close (dir2); g_rmdir (path1); g_free (path1); } g_dir_close (dir1); g_rmdir (obj_dir); g_free (obj_dir); return 0; } ObjBackend * obj_backend_fs_new (const char *seaf_dir, const char *obj_type) { ObjBackend *bend; FsPriv *priv; bend = g_new0(ObjBackend, 1); priv = g_new0(FsPriv, 1); bend->priv = priv; priv->obj_dir = g_build_filename (seaf_dir, "storage", obj_type, NULL); priv->dir_len = strlen (priv->obj_dir); if (g_mkdir_with_parents (priv->obj_dir, 0777) < 0) { seaf_warning ("[Obj Backend] Objects dir %s does not exist and" " is unable to create\n", priv->obj_dir); goto onerror; } bend->read = obj_backend_fs_read; bend->write = obj_backend_fs_write; bend->exists = obj_backend_fs_exists; bend->delete = obj_backend_fs_delete; bend->foreach_obj = obj_backend_fs_foreach_obj; bend->copy = obj_backend_fs_copy; bend->remove_store = obj_backend_fs_remove_store; return bend; onerror: g_free (priv->obj_dir); g_free (priv); g_free (bend); return NULL; } ================================================ FILE: common/obj-backend-riak.c ================================================ #include "common.h" #include "log.h" #include "obj-backend.h" #ifdef RIAK_BACKEND #include "riak-client.h" #include typedef struct RiakPriv { const char *host; const char *port; const char *bucket; int n_write; GQueue *conn_pool; pthread_mutex_t lock; } RiakPriv; static SeafRiakClient * get_connection (RiakPriv *priv) { SeafRiakClient *connection; pthread_mutex_lock (&priv->lock); connection = g_queue_pop_head (priv->conn_pool); if (!connection) connection = seaf_riak_client_new (priv->host, priv->port); pthread_mutex_unlock (&priv->lock); return connection; } static void return_connection (RiakPriv *priv, SeafRiakClient *connection) { pthread_mutex_lock (&priv->lock); g_queue_push_tail (priv->conn_pool, connection); pthread_mutex_unlock (&priv->lock); } static int obj_backend_riak_read (ObjBackend *bend, const char *obj_id, void **data, int *len) { SeafRiakClient *conn = get_connection (bend->priv); RiakPriv *priv = bend->priv; int ret; ret = seaf_riak_client_get (conn, priv->bucket, obj_id, data, len); return_connection (priv, conn); return ret; } static int obj_backend_riak_write (ObjBackend *bend, const char *obj_id, void *data, int len) { SeafRiakClient *conn = get_connection (bend->priv); RiakPriv *priv = bend->priv; int ret; ret = seaf_riak_client_put (conn, priv->bucket, obj_id, data, len, priv->n_write); return_connection (priv, conn); return ret; } static gboolean obj_backend_riak_exists (ObjBackend *bend, const char *obj_id) { SeafRiakClient *conn = get_connection (bend->priv); RiakPriv *priv = bend->priv; gboolean ret; ret = seaf_riak_client_query (conn, priv->bucket, obj_id); return_connection (priv, conn); return ret; } static void obj_backend_riak_delete (ObjBackend *bend, const char *obj_id) { SeafRiakClient *conn = get_connection (bend->priv); RiakPriv *priv = bend->priv; seaf_riak_client_delete (conn, priv->bucket, obj_id, priv->n_write); return_connection (priv, conn); } ObjBackend * obj_backend_riak_new (const char *host, const char *port, const char *bucket, const char *write_policy) { ObjBackend *bend; RiakPriv *priv; bend = g_new0(ObjBackend, 1); priv = g_new0(RiakPriv, 1); bend->priv = priv; priv->host = g_strdup (host); priv->port = g_strdup (port); priv->bucket = g_strdup (bucket); if (strcmp (write_policy, "quorum") == 0) priv->n_write = RIAK_QUORUM; else if (strcmp (write_policy, "all") == 0) priv->n_write = RIAK_ALL; else g_return_val_if_reached (NULL); priv->conn_pool = g_queue_new (); pthread_mutex_init (&priv->lock, NULL); bend->read = obj_backend_riak_read; bend->write = obj_backend_riak_write; bend->exists = obj_backend_riak_exists; bend->delete = obj_backend_riak_delete; return bend; } #else ObjBackend * obj_backend_riak_new (const char *host, const char *port, const char *bucket, const char *write_policy) { seaf_warning ("Riak backend is not enabled.\n"); return NULL; } #endif /* RIAK_BACKEND */ ================================================ FILE: common/obj-backend.h ================================================ #ifndef OBJ_BACKEND_H #define OBJ_BACKEND_H #include #include "obj-store.h" typedef struct ObjBackend ObjBackend; struct ObjBackend { int (*read) (ObjBackend *bend, const char *repo_id, int version, const char *obj_id, void **data, int *len); int (*write) (ObjBackend *bend, const char *repo_id, int version, const char *obj_id, void *data, int len, gboolean need_sync); gboolean (*exists) (ObjBackend *bend, const char *repo_id, int version, const char *obj_id); void (*delete) (ObjBackend *bend, const char *repo_id, int version, const char *obj_id); int (*foreach_obj) (ObjBackend *bend, const char *repo_id, int version, SeafObjFunc process, void *user_data); int (*copy) (ObjBackend *bend, const char *src_repo_id, int src_version, const char *dst_repo_id, int dst_version, const char *obj_id); int (*remove_store) (ObjBackend *bend, const char *store_id); void *priv; }; #endif ================================================ FILE: common/obj-cache.c ================================================ /* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */ #include "common.h" #define DEBUG_FLAG SEAFILE_DEBUG_OTHER #include "log.h" #include "redis-cache.h" #include "obj-cache.h" #define DEFAULT_MEMCACHED_EXPIRY 24 * 3600 #define DEFAULT_MAX_CONNECTIONS 100 typedef struct CacheOption { char *cache_provider; char *redis_host; char *redis_passwd; int redis_port; int redis_max_connections; int redis_expiry; } CacheOption; static void cache_option_free (CacheOption *option) { if (!option) return; g_free (option->cache_provider); g_free (option->redis_host); g_free (option->redis_passwd); g_free (option); } static void load_cache_option_from_env (CacheOption *option) { const char *cache_provider, *redis_host, *redis_port, *redis_passwd, *redis_max_conn, *redis_expiry; cache_provider = g_getenv("CACHE_PROVIDER"); redis_host = g_getenv("REDIS_HOST"); redis_port = g_getenv("REDIS_PORT"); redis_passwd = g_getenv("REDIS_PASSWORD"); redis_max_conn = g_getenv("REDIS_MAX_CONNECTIONS"); redis_expiry = g_getenv("REDIS_EXPIRY"); if (!cache_provider || g_strcmp0 (cache_provider, "") == 0) { return; } if (cache_provider) { g_free (option->cache_provider); option->cache_provider = g_strdup (cache_provider); } if (redis_host && g_strcmp0(redis_host, "") != 0) { g_free (option->redis_host); option->redis_host = g_strdup (redis_host); } if (redis_port && g_strcmp0(redis_port, "") != 0) { option->redis_port = atoi (redis_port); } if (redis_passwd && g_strcmp0 (redis_passwd, "") != 0) { g_free (option->redis_passwd); option->redis_passwd = g_strdup (redis_passwd); } if (redis_max_conn && g_strcmp0 (redis_max_conn, "") != 0) { option->redis_max_connections = atoi (redis_max_conn); } if (redis_expiry && g_strcmp0 (redis_expiry, "") != 0) { option->redis_expiry = atoi (redis_expiry); } } ObjCache * objcache_new (GKeyFile *config) { ObjCache *cache = NULL; GError *error = NULL; CacheOption *option = g_new0 (CacheOption, 1); int redis_port; int redis_expiry; int redis_max_connections; redis_expiry = DEFAULT_MEMCACHED_EXPIRY; redis_port = 6379; redis_max_connections = DEFAULT_MAX_CONNECTIONS; option->redis_port = redis_port; option->redis_max_connections = redis_max_connections; option->redis_expiry = redis_expiry; load_cache_option_from_env (option); if (g_strcmp0 (option->cache_provider, "redis") == 0) { cache = redis_cache_new (option->redis_host, option->redis_passwd, option->redis_port, option->redis_expiry, option->redis_max_connections); } else if (option->cache_provider){ seaf_warning ("Unsupported cache provider: %s\n", option->cache_provider); } cache_option_free (option); return cache; } void * objcache_get_object (ObjCache *cache, const char *obj_id, size_t *len) { return cache->get_object (cache, obj_id, len); } int objcache_set_object (ObjCache *cache, const char *obj_id, const void *object, int len, int expiry) { return cache->set_object (cache, obj_id, object, len, expiry); } gboolean objcache_test_object (ObjCache *cache, const char *obj_id) { return cache->test_object (cache, obj_id); } int objcache_delete_object (ObjCache *cache, const char *obj_id) { return cache->delete_object (cache, obj_id); } int objcache_set_object_existence (ObjCache *cache, const char *obj_id, int val, int expiry, const char *existence_prefix) { char *key; char buf[8]; int n; int ret; key = g_strdup_printf ("%s%s", existence_prefix, obj_id); n = snprintf (buf, sizeof(buf), "%d", val); ret = cache->set_object (cache, key, buf, n+1, expiry); g_free (key); return ret; } int objcache_get_object_existence (ObjCache *cache, const char *obj_id, int *val_out, const char *existence_prefix) { char *key; size_t len; char *val; int ret = 0; key = g_strdup_printf ("%s%s", existence_prefix, obj_id); val = cache->get_object (cache, key, &len); if (!val) ret = -1; else *val_out = atoi(val); g_free (key); g_free (val); return ret; } int objcache_delete_object_existence (ObjCache *cache, const char *obj_id, const char *existence_prefix) { char *key; int ret; key = g_strdup_printf ("%s%s", existence_prefix, obj_id); ret = cache->delete_object (cache, key); g_free (key); return ret; } int objcache_publish (ObjCache *cache, const char *channel, const char *msg) { int ret; ret = cache->publish (cache, channel, msg); return ret; } int objcache_push (ObjCache *cache, const char *list, const char *msg) { int ret; ret = cache->push (cache, list, msg); return ret; } ================================================ FILE: common/obj-cache.h ================================================ /* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */ #ifndef OBJ_CACHE_H #define OBJ_CACHE_H #define DEFAULT_MEMCACHED_EXPIRY 24 * 3600 #define TYPE_REDIS 0x02 typedef struct ObjCache ObjCache; struct ObjCache { void* (*get_object) (ObjCache *cache, const char *obj_id, size_t *len); int (*set_object) (ObjCache *cache, const char *obj_id, const void *object, int len, int expiry); gboolean (*test_object) (ObjCache *cache, const char *obj_id); int (*delete_object) (ObjCache *cache, const char *obj_id); int (*publish) (ObjCache *cache, const char *channel, const char *msg); int (*push) (ObjCache *cache, const char *list, const char *msg); int mc_expiry; char *host; int port; char cache_type; void *priv; }; ObjCache * objcache_new (); void * objcache_get_object (struct ObjCache *cache, const char *obj_id, size_t *len); int objcache_set_object (struct ObjCache *cache, const char *obj_id, const void *object, int len, int expiry); gboolean objcache_test_object (struct ObjCache *cache, const char *obj_id); int objcache_delete_object (struct ObjCache *cache, const char *obj_id); int objcache_set_object_existence (struct ObjCache *cache, const char *obj_id, int val, int expiry, const char *existence_prefix); int objcache_get_object_existence (struct ObjCache *cache, const char *obj_id, int *val_out, const char *existence_prefix); int objcache_delete_object_existence (struct ObjCache *cache, const char *obj_id, const char *existence_prefix); int objcache_publish (ObjCache *cache, const char *channel, const char *msg); int objcache_push (ObjCache *cache, const char *list, const char *msg); #endif ================================================ FILE: common/obj-store.c ================================================ #include "common.h" #include "log.h" #include "seafile-session.h" #include "utils.h" #include "obj-backend.h" #include "obj-store.h" struct SeafObjStore { ObjBackend *bend; }; typedef struct SeafObjStore SeafObjStore; extern ObjBackend * obj_backend_fs_new (const char *seaf_dir, const char *obj_type); struct SeafObjStore * seaf_obj_store_new (SeafileSession *seaf, const char *obj_type) { SeafObjStore *store = g_new0 (SeafObjStore, 1); if (!store) return NULL; store->bend = obj_backend_fs_new (seaf->seaf_dir, obj_type); if (!store->bend) { seaf_warning ("[Object store] Failed to load backend.\n"); g_free (store); return NULL; } return store; } int seaf_obj_store_init (SeafObjStore *obj_store) { return 0; } int seaf_obj_store_read_obj (struct SeafObjStore *obj_store, const char *repo_id, int version, const char *obj_id, void **data, int *len) { ObjBackend *bend = obj_store->bend; if (!repo_id || !is_uuid_valid(repo_id) || !obj_id || !is_object_id_valid(obj_id)) return -1; return bend->read (bend, repo_id, version, obj_id, data, len); } int seaf_obj_store_write_obj (struct SeafObjStore *obj_store, const char *repo_id, int version, const char *obj_id, void *data, int len, gboolean need_sync) { ObjBackend *bend = obj_store->bend; if (!repo_id || !is_uuid_valid(repo_id) || !obj_id || !is_object_id_valid(obj_id)) return -1; return bend->write (bend, repo_id, version, obj_id, data, len, need_sync); } gboolean seaf_obj_store_obj_exists (struct SeafObjStore *obj_store, const char *repo_id, int version, const char *obj_id) { ObjBackend *bend = obj_store->bend; if (!repo_id || !is_uuid_valid(repo_id) || !obj_id || !is_object_id_valid(obj_id)) return FALSE; return bend->exists (bend, repo_id, version, obj_id); } void seaf_obj_store_delete_obj (struct SeafObjStore *obj_store, const char *repo_id, int version, const char *obj_id) { ObjBackend *bend = obj_store->bend; if (!repo_id || !is_uuid_valid(repo_id) || !obj_id || !is_object_id_valid(obj_id)) return; return bend->delete (bend, repo_id, version, obj_id); } int seaf_obj_store_foreach_obj (struct SeafObjStore *obj_store, const char *repo_id, int version, SeafObjFunc process, void *user_data) { ObjBackend *bend = obj_store->bend; return bend->foreach_obj (bend, repo_id, version, process, user_data); } int seaf_obj_store_copy_obj (struct SeafObjStore *obj_store, const char *src_repo_id, int src_version, const char *dst_repo_id, int dst_version, const char *obj_id) { ObjBackend *bend = obj_store->bend; if (strcmp (obj_id, EMPTY_SHA1) == 0) return 0; return bend->copy (bend, src_repo_id, src_version, dst_repo_id, dst_version, obj_id); } int seaf_obj_store_remove_store (struct SeafObjStore *obj_store, const char *store_id) { ObjBackend *bend = obj_store->bend; return bend->remove_store (bend, store_id); } ================================================ FILE: common/obj-store.h ================================================ #ifndef OBJ_STORE_H #define OBJ_STORE_H #include #include struct _SeafileSession; struct SeafObjStore; struct SeafObjStore * seaf_obj_store_new (struct _SeafileSession *seaf, const char *obj_type); int seaf_obj_store_init (struct SeafObjStore *obj_store); /* Synchronous I/O interface. */ int seaf_obj_store_read_obj (struct SeafObjStore *obj_store, const char *repo_id, int version, const char *obj_id, void **data, int *len); int seaf_obj_store_write_obj (struct SeafObjStore *obj_store, const char *repo_id, int version, const char *obj_id, void *data, int len, gboolean need_sync); gboolean seaf_obj_store_obj_exists (struct SeafObjStore *obj_store, const char *repo_id, int version, const char *obj_id); void seaf_obj_store_delete_obj (struct SeafObjStore *obj_store, const char *repo_id, int version, const char *obj_id); typedef gboolean (*SeafObjFunc) (const char *repo_id, int version, const char *obj_id, void *user_data); int seaf_obj_store_foreach_obj (struct SeafObjStore *obj_store, const char *repo_id, int version, SeafObjFunc process, void *user_data); int seaf_obj_store_copy_obj (struct SeafObjStore *obj_store, const char *src_store_id, int src_version, const char *dst_store_id, int dst_version, const char *obj_id); int seaf_obj_store_remove_store (struct SeafObjStore *obj_store, const char *store_id); #endif ================================================ FILE: common/object-list.c ================================================ /* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */ #include "common.h" #include "object-list.h" ObjectList * object_list_new () { ObjectList *ol = g_new0 (ObjectList, 1); ol->obj_hash = g_hash_table_new_full (g_str_hash, g_str_equal, NULL, NULL); ol->obj_ids = g_ptr_array_new_with_free_func (g_free); return ol; } void object_list_free (ObjectList *ol) { if (ol->obj_hash) g_hash_table_destroy (ol->obj_hash); g_ptr_array_free (ol->obj_ids, TRUE); g_free (ol); } void object_list_serialize (ObjectList *ol, uint8_t **buffer, uint32_t *len) { uint32_t i; uint32_t offset = 0; uint8_t *buf; int ollen = object_list_length(ol); buf = g_new (uint8_t, 41 * ollen); for (i = 0; i < ollen; ++i) { memcpy (&buf[offset], g_ptr_array_index(ol->obj_ids, i), 41); offset += 41; } *buffer = buf; *len = 41 * ollen; } gboolean object_list_insert (ObjectList *ol, const char *object_id) { if (g_hash_table_lookup (ol->obj_hash, object_id)) return FALSE; char *id = g_strdup(object_id); g_hash_table_replace (ol->obj_hash, id, id); g_ptr_array_add (ol->obj_ids, id); return TRUE; } ================================================ FILE: common/object-list.h ================================================ /* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */ #ifndef OBJECT_LIST_H #define OBJECT_LIST_H #include typedef struct { GHashTable *obj_hash; GPtrArray *obj_ids; } ObjectList; ObjectList * object_list_new (); void object_list_free (ObjectList *ol); void object_list_serialize (ObjectList *ol, uint8_t **buffer, uint32_t *len); /** * Add object to ObjectList. * Return FALSE if it is already in the list, TRUE otherwise. */ gboolean object_list_insert (ObjectList *ol, const char *object_id); inline static gboolean object_list_exists (ObjectList *ol, const char *object_id) { return (g_hash_table_lookup(ol->obj_hash, object_id) != NULL); } inline static int object_list_length (ObjectList *ol) { return ol->obj_ids->len; } #endif ================================================ FILE: common/org-mgr.c ================================================ /* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */ #include "common.h" #include "seafile-session.h" #include "seaf-db.h" #include "org-mgr.h" #include "seaf-utils.h" #include "utils.h" #include "log.h" #define DEFAULT_MAX_CONNECTIONS 100 struct _CcnetOrgManagerPriv { CcnetDB *db; }; static int open_db (CcnetOrgManager *manager); static int check_db_table (CcnetDB *db); CcnetOrgManager* ccnet_org_manager_new (SeafileSession *session) { CcnetOrgManager *manager = g_new0 (CcnetOrgManager, 1); manager->session = session; manager->priv = g_new0 (CcnetOrgManagerPriv, 1); return manager; } int ccnet_org_manager_init (CcnetOrgManager *manager) { return 0; } int ccnet_org_manager_prepare (CcnetOrgManager *manager) { return open_db (manager); } static CcnetDB * open_sqlite_db (CcnetOrgManager *manager) { CcnetDB *db = NULL; char *db_dir; char *db_path; db_dir = g_build_filename (manager->session->ccnet_dir, "OrgMgr", NULL); if (checkdir_with_mkdir(db_dir) < 0) { ccnet_error ("Cannot open db dir %s: %s\n", db_dir, strerror(errno)); g_free (db_dir); return NULL; } g_free (db_dir); db_path = g_build_filename (manager->session->ccnet_dir, "OrgMgr", "orgmgr.db", NULL); db = seaf_db_new_sqlite (db_path, DEFAULT_MAX_CONNECTIONS); g_free (db_path); return db; } static int open_db (CcnetOrgManager *manager) { CcnetDB *db = NULL; switch (seaf_db_type(manager->session->ccnet_db)) { case SEAF_DB_TYPE_SQLITE: db = open_sqlite_db (manager); break; case SEAF_DB_TYPE_PGSQL: case SEAF_DB_TYPE_MYSQL: db = manager->session->ccnet_db; break; } if (!db) return -1; manager->priv->db = db; if ((manager->session->create_tables || seaf_db_type(db) == SEAF_DB_TYPE_PGSQL) && check_db_table (db) < 0) { ccnet_warning ("Failed to create org db tables.\n"); return -1; } return 0; } void ccnet_org_manager_start (CcnetOrgManager *manager) { } /* -------- Group Database Management ---------------- */ static int check_db_table (CcnetDB *db) { char *sql; int db_type = seaf_db_type (db); if (db_type == SEAF_DB_TYPE_MYSQL) { sql = "CREATE TABLE IF NOT EXISTS Organization (org_id BIGINT" " PRIMARY KEY AUTO_INCREMENT, org_name VARCHAR(255)," " url_prefix VARCHAR(255), creator VARCHAR(255), ctime BIGINT," " UNIQUE INDEX (url_prefix))" "ENGINE=INNODB"; if (seaf_db_query (db, sql) < 0) return -1; sql = "CREATE TABLE IF NOT EXISTS OrgUser ( " "id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, org_id INTEGER, " "email VARCHAR(255), is_staff BOOL NOT NULL, " "INDEX (email), UNIQUE INDEX(org_id, email))" "ENGINE=INNODB"; if (seaf_db_query (db, sql) < 0) return -1; sql = "CREATE TABLE IF NOT EXISTS OrgGroup (" "id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, org_id INTEGER, " "group_id INTEGER, INDEX (group_id), " "UNIQUE INDEX(org_id, group_id))" "ENGINE=INNODB"; if (seaf_db_query (db, sql) < 0) return -1; } else if (db_type == SEAF_DB_TYPE_SQLITE) { sql = "CREATE TABLE IF NOT EXISTS Organization (org_id INTEGER" " PRIMARY KEY AUTOINCREMENT, org_name VARCHAR(255)," " url_prefix VARCHAR(255), " " creator VARCHAR(255), ctime BIGINT)"; if (seaf_db_query (db, sql) < 0) return -1; sql = "CREATE UNIQUE INDEX IF NOT EXISTS url_prefix_indx on " "Organization (url_prefix)"; if (seaf_db_query (db, sql) < 0) return -1; sql = "CREATE TABLE IF NOT EXISTS OrgUser (org_id INTEGER, " "email TEXT, is_staff bool NOT NULL)"; if (seaf_db_query (db, sql) < 0) return -1; sql = "CREATE INDEX IF NOT EXISTS email_indx on " "OrgUser (email)"; if (seaf_db_query (db, sql) < 0) return -1; sql = "CREATE UNIQUE INDEX IF NOT EXISTS orgid_email_indx on " "OrgUser (org_id, email)"; if (seaf_db_query (db, sql) < 0) return -1; sql = "CREATE TABLE IF NOT EXISTS OrgGroup (org_id INTEGER, " "group_id INTEGER)"; if (seaf_db_query (db, sql) < 0) return -1; sql = "CREATE INDEX IF NOT EXISTS groupid_indx on OrgGroup (group_id)"; if (seaf_db_query (db, sql) < 0) return -1; sql = "CREATE UNIQUE INDEX IF NOT EXISTS org_group_indx on " "OrgGroup (org_id, group_id)"; if (seaf_db_query (db, sql) < 0) return -1; } else if (db_type == SEAF_DB_TYPE_PGSQL) { sql = "CREATE TABLE IF NOT EXISTS Organization (org_id SERIAL" " PRIMARY KEY, org_name VARCHAR(255)," " url_prefix VARCHAR(255), creator VARCHAR(255), ctime BIGINT," " UNIQUE (url_prefix))"; if (seaf_db_query (db, sql) < 0) return -1; sql = "CREATE TABLE IF NOT EXISTS OrgUser (org_id INTEGER, " "email VARCHAR(255), is_staff INTEGER NOT NULL, " "UNIQUE (org_id, email))"; if (seaf_db_query (db, sql) < 0) return -1; //if (!pgsql_index_exists (db, "orguser_email_idx")) { // sql = "CREATE INDEX orguser_email_idx ON OrgUser (email)"; // if (seaf_db_query (db, sql) < 0) // return -1; //} sql = "CREATE TABLE IF NOT EXISTS OrgGroup (org_id INTEGER, " "group_id INTEGER, " "UNIQUE (org_id, group_id))"; if (seaf_db_query (db, sql) < 0) return -1; //if (!pgsql_index_exists (db, "orggroup_groupid_idx")) { // sql = "CREATE INDEX orggroup_groupid_idx ON OrgGroup (group_id)"; // if (seaf_db_query (db, sql) < 0) // return -1; //} } return 0; } int ccnet_org_manager_create_org (CcnetOrgManager *mgr, const char *org_name, const char *url_prefix, const char *creator, GError **error) { CcnetDB *db = mgr->priv->db; gint64 now = get_current_time(); int rc; rc = seaf_db_statement_query (db, "INSERT INTO Organization(org_name, url_prefix," " creator, ctime) VALUES (?, ?, ?, ?)", 4, "string", org_name, "string", url_prefix, "string", creator, "int64", now); if (rc < 0) { g_set_error (error, CCNET_DOMAIN, 0, "Failed to create organization"); return -1; } int org_id = seaf_db_statement_get_int (db, "SELECT org_id FROM Organization WHERE " "url_prefix = ?", 1, "string", url_prefix); if (org_id < 0) { g_set_error (error, CCNET_DOMAIN, 0, "Failed to create organization"); return -1; } rc = seaf_db_statement_query (db, "INSERT INTO OrgUser (org_id, email, is_staff) values (?, ?, ?)", 3, "int", org_id, "string", creator, "int", 1); if (rc < 0) { seaf_db_statement_query (db, "DELETE FROM Organization WHERE org_id=?", 1, "int", org_id); g_set_error (error, CCNET_DOMAIN, 0, "Failed to create organization"); return -1; } return org_id; } int ccnet_org_manager_remove_org (CcnetOrgManager *mgr, int org_id, GError **error) { CcnetDB *db = mgr->priv->db; seaf_db_statement_query (db, "DELETE FROM Organization WHERE org_id = ?", 1, "int", org_id); seaf_db_statement_query (db, "DELETE FROM OrgUser WHERE org_id = ?", 1, "int", org_id); seaf_db_statement_query (db, "DELETE FROM OrgGroup WHERE org_id = ?", 1, "int", org_id); return 0; } static gboolean get_all_orgs_cb (CcnetDBRow *row, void *data) { GList **p_list = data; CcnetOrganization *org = NULL; int org_id; const char *org_name; const char *url_prefix; const char *creator; gint64 ctime; org_id = seaf_db_row_get_column_int (row, 0); org_name = seaf_db_row_get_column_text (row, 1); url_prefix = seaf_db_row_get_column_text (row, 2); creator = seaf_db_row_get_column_text (row, 3); ctime = seaf_db_row_get_column_int64 (row, 4); org = g_object_new (CCNET_TYPE_ORGANIZATION, "org_id", org_id, "org_name", org_name, "url_prefix", url_prefix, "creator", creator, "ctime", ctime, NULL); *p_list = g_list_prepend (*p_list, org); return TRUE; } GList * ccnet_org_manager_get_all_orgs (CcnetOrgManager *mgr, int start, int limit) { CcnetDB *db = mgr->priv->db; char *sql; GList *ret = NULL; int rc; if (start == -1 && limit == -1) { sql = "SELECT * FROM Organization ORDER BY org_id"; rc = seaf_db_statement_foreach_row (db, sql, get_all_orgs_cb, &ret, 0); } else { sql = "SELECT * FROM Organization ORDER BY org_id LIMIT ? OFFSET ?"; rc = seaf_db_statement_foreach_row (db, sql, get_all_orgs_cb, &ret, 2, "int", limit, "int", start); } if (rc < 0) return NULL; return g_list_reverse (ret); } int ccnet_org_manager_count_orgs (CcnetOrgManager *mgr) { CcnetDB *db = mgr->priv->db; char *sql; gint64 ret; sql = "SELECT count(*) FROM Organization"; ret = seaf_db_get_int64 (db, sql); if (ret < 0) return -1; return ret; } static gboolean get_org_cb (CcnetDBRow *row, void *data) { CcnetOrganization **p_org = data; int org_id; const char *org_name; const char *url_prefix; const char *creator; gint64 ctime; org_id = seaf_db_row_get_column_int (row, 0); org_name = seaf_db_row_get_column_text (row, 1); url_prefix = seaf_db_row_get_column_text (row, 2); creator = seaf_db_row_get_column_text (row, 3); ctime = seaf_db_row_get_column_int64 (row, 4); *p_org = g_object_new (CCNET_TYPE_ORGANIZATION, "org_id", org_id, "org_name", org_name, "url_prefix", url_prefix, "creator", creator, "ctime", ctime, NULL); return FALSE; } CcnetOrganization * ccnet_org_manager_get_org_by_url_prefix (CcnetOrgManager *mgr, const char *url_prefix, GError **error) { CcnetDB *db = mgr->priv->db; char *sql; CcnetOrganization *org = NULL; sql = "SELECT org_id, org_name, url_prefix, creator," " ctime FROM Organization WHERE url_prefix = ?"; if (seaf_db_statement_foreach_row (db, sql, get_org_cb, &org, 1, "string", url_prefix) < 0) { return NULL; } return org; } CcnetOrganization * ccnet_org_manager_get_org_by_id (CcnetOrgManager *mgr, int org_id, GError **error) { CcnetDB *db = mgr->priv->db; char *sql; CcnetOrganization *org = NULL; sql = "SELECT org_id, org_name, url_prefix, creator," " ctime FROM Organization WHERE org_id = ?"; if (seaf_db_statement_foreach_row (db, sql, get_org_cb, &org, 1, "int", org_id) < 0) { return NULL; } return org; } int ccnet_org_manager_add_org_user (CcnetOrgManager *mgr, int org_id, const char *email, int is_staff, GError **error) { CcnetDB *db = mgr->priv->db; return seaf_db_statement_query (db, "INSERT INTO OrgUser (org_id, email, is_staff) values (?, ?, ?)", 3, "int", org_id, "string", email, "int", is_staff); } int ccnet_org_manager_remove_org_user (CcnetOrgManager *mgr, int org_id, const char *email, GError **error) { CcnetDB *db = mgr->priv->db; return seaf_db_statement_query (db, "DELETE FROM OrgUser WHERE org_id=? AND " "email=?", 2, "int", org_id, "string", email); } static gboolean get_orgs_by_user_cb (CcnetDBRow *row, void *data) { GList **p_list = (GList **)data; CcnetOrganization *org = NULL; int org_id; const char *email; int is_staff; const char *org_name; const char *url_prefix; const char *creator; gint64 ctime; org_id = seaf_db_row_get_column_int (row, 0); email = (char *) seaf_db_row_get_column_text (row, 1); is_staff = seaf_db_row_get_column_int (row, 2); org_name = (char *) seaf_db_row_get_column_text (row, 3); url_prefix = (char *) seaf_db_row_get_column_text (row, 4); creator = (char *) seaf_db_row_get_column_text (row, 5); ctime = seaf_db_row_get_column_int64 (row, 6); org = g_object_new (CCNET_TYPE_ORGANIZATION, "org_id", org_id, "email", email, "is_staff", is_staff, "org_name", org_name, "url_prefix", url_prefix, "creator", creator, "ctime", ctime, NULL); *p_list = g_list_prepend (*p_list, org); return TRUE; } GList * ccnet_org_manager_get_orgs_by_user (CcnetOrgManager *mgr, const char *email, GError **error) { CcnetDB *db = mgr->priv->db; char *sql; GList *ret = NULL; sql = "SELECT t1.org_id, email, is_staff, org_name," " url_prefix, creator, ctime FROM OrgUser t1, Organization t2" " WHERE t1.org_id = t2.org_id AND email = ?"; if (seaf_db_statement_foreach_row (db, sql, get_orgs_by_user_cb, &ret, 1, "string", email) < 0) { g_list_free (ret); return NULL; } return g_list_reverse (ret); } static gboolean get_org_emailusers (CcnetDBRow *row, void *data) { GList **list = (GList **)data; const char *email = (char *) seaf_db_row_get_column_text (row, 0); *list = g_list_prepend (*list, g_strdup (email)); return TRUE; } GList * ccnet_org_manager_get_org_emailusers (CcnetOrgManager *mgr, const char *url_prefix, int start, int limit) { CcnetDB *db = mgr->priv->db; char *sql; GList *ret = NULL; int rc; if (start == -1 && limit == -1) { sql = "SELECT u.email FROM OrgUser u, Organization o " "WHERE u.org_id = o.org_id AND " "o.url_prefix = ? " "ORDER BY email"; rc = seaf_db_statement_foreach_row (db, sql, get_org_emailusers, &ret, 1, "string", url_prefix); } else { sql = "SELECT u.email FROM OrgUser u, Organization o " "WHERE u.org_id = o.org_id AND " "o.url_prefix = ? " " ORDER BY email LIMIT ? OFFSET ?"; rc = seaf_db_statement_foreach_row (db, sql, get_org_emailusers, &ret, 3, "string", url_prefix, "int", limit, "int", start); } if (rc < 0) return NULL; return g_list_reverse (ret); } int ccnet_org_manager_add_org_group (CcnetOrgManager *mgr, int org_id, int group_id, GError **error) { CcnetDB *db = mgr->priv->db; return seaf_db_statement_query (db, "INSERT INTO OrgGroup (org_id, group_id) VALUES (?, ?)", 2, "int", org_id, "int", group_id); } int ccnet_org_manager_remove_org_group (CcnetOrgManager *mgr, int org_id, int group_id, GError **error) { CcnetDB *db = mgr->priv->db; return seaf_db_statement_query (db, "DELETE FROM OrgGroup WHERE org_id=?" " AND group_id=?", 2, "int", org_id, "string", group_id); } int ccnet_org_manager_is_org_group (CcnetOrgManager *mgr, int group_id, GError **error) { gboolean exists, err; CcnetDB *db = mgr->priv->db; exists = seaf_db_statement_exists (db, "SELECT group_id FROM OrgGroup " "WHERE group_id = ?", &err, 1, "int", group_id); if (err) { ccnet_warning ("DB error when check group exist in OrgGroup.\n"); return 0; } return exists; } int ccnet_org_manager_get_org_id_by_group (CcnetOrgManager *mgr, int group_id, GError **error) { CcnetDB *db = mgr->priv->db; char *sql; sql = "SELECT org_id FROM OrgGroup WHERE group_id = ?"; return seaf_db_statement_get_int (db, sql, 1, "int", group_id); } static gboolean get_org_group_ids (CcnetDBRow *row, void *data) { GList **plist = data; int group_id = seaf_db_row_get_column_int (row, 0); *plist = g_list_prepend (*plist, (gpointer)(long)group_id); return TRUE; } GList * ccnet_org_manager_get_org_group_ids (CcnetOrgManager *mgr, int org_id, int start, int limit) { CcnetDB *db = mgr->priv->db; GList *ret = NULL; int rc; if (limit == -1) { rc = seaf_db_statement_foreach_row (db, "SELECT group_id FROM OrgGroup WHERE " "org_id = ?", get_org_group_ids, &ret, 1, "int", org_id); } else { rc = seaf_db_statement_foreach_row (db, "SELECT group_id FROM OrgGroup WHERE " "org_id = ? LIMIT ? OFFSET ?", get_org_group_ids, &ret, 3, "int", org_id, "int", limit, "int", start); } if (rc < 0) { g_list_free (ret); return NULL; } return g_list_reverse (ret); } static gboolean get_org_groups (CcnetDBRow *row, void *data) { GList **plist = data; CcnetGroup *group; int group_id = seaf_db_row_get_column_int (row, 0); const char *group_name = seaf_db_row_get_column_text (row, 1); const char *creator_name = seaf_db_row_get_column_text (row, 2); gint64 ts = seaf_db_row_get_column_int64 (row, 3); int parent_group_id = seaf_db_row_get_column_int (row, 4); group = g_object_new (CCNET_TYPE_GROUP, "id", group_id, "group_name", group_name, "creator_name", creator_name, "timestamp", ts, "source", "DB", "parent_group_id", parent_group_id, NULL); *plist = g_list_prepend (*plist, group); return TRUE; } GList * ccnet_org_manager_get_org_top_groups (CcnetOrgManager *mgr, int org_id, GError **error) { CcnetDB *db = mgr->priv->db; GList *ret = NULL; char *sql; int rc; sql = "SELECT g.group_id, group_name, creator_name, timestamp, parent_group_id FROM " "`OrgGroup` o, `Group` g WHERE o.group_id = g.group_id AND " "org_id=? AND parent_group_id=-1 ORDER BY timestamp DESC"; rc = seaf_db_statement_foreach_row (db, sql, get_org_groups, &ret, 1, "int", org_id); if (rc < 0) return NULL; return g_list_reverse (ret); } GList * ccnet_org_manager_get_org_groups (CcnetOrgManager *mgr, int org_id, int start, int limit) { CcnetDB *db = mgr->priv->db; char *sql; GList *ret = NULL; int rc; if (limit == -1) { sql = "SELECT g.group_id, group_name, creator_name, timestamp, parent_group_id FROM " "OrgGroup o, `Group` g WHERE o.group_id = g.group_id AND org_id = ?"; rc = seaf_db_statement_foreach_row (db, sql, get_org_groups, &ret, 1, "int", org_id); } else { sql = "SELECT g.group_id, group_name, creator_name, timestamp, parent_group_id FROM " "OrgGroup o, `Group` g WHERE o.group_id = g.group_id AND org_id = ? " "LIMIT ? OFFSET ?"; rc = seaf_db_statement_foreach_row (db, sql, get_org_groups, &ret, 3, "int", org_id, "int", limit, "int", start); } if (rc < 0) { return NULL; } return g_list_reverse (ret); } GList * ccnet_org_manager_get_org_groups_by_user (CcnetOrgManager *mgr, const char *user, int org_id) { CcnetDB *db = mgr->priv->db; char *sql; GList *ret = NULL; int rc; sql = "SELECT g.group_id, group_name, creator_name, timestamp FROM " "OrgGroup o, `Group` g, GroupUser u " "WHERE o.group_id = g.group_id AND org_id = ? AND " "g.group_id = u.group_id AND user_name = ?"; rc = seaf_db_statement_foreach_row (db, sql, get_org_groups, &ret, 2, "int", org_id, "string", user); if (rc < 0) return NULL; return g_list_reverse (ret); } int ccnet_org_manager_org_user_exists (CcnetOrgManager *mgr, int org_id, const char *email, GError **error) { gboolean exists, err; CcnetDB *db = mgr->priv->db; exists = seaf_db_statement_exists (db, "SELECT org_id FROM OrgUser WHERE " "org_id = ? AND email = ?", &err, 2, "int", org_id, "string", email); if (err) { ccnet_warning ("DB error when check user exist in OrgUser.\n"); return 0; } return exists; } char * ccnet_org_manager_get_url_prefix_by_org_id (CcnetOrgManager *mgr, int org_id, GError **error) { CcnetDB *db = mgr->priv->db; char *sql; sql = "SELECT url_prefix FROM Organization WHERE org_id = ?"; return seaf_db_statement_get_string (db, sql, 1, "int", org_id); } int ccnet_org_manager_is_org_staff (CcnetOrgManager *mgr, int org_id, const char *email, GError **error) { CcnetDB *db = mgr->priv->db; char *sql; sql = "SELECT is_staff FROM OrgUser WHERE org_id=? AND email=?"; return seaf_db_statement_get_int (db, sql, 2, "int", org_id, "string", email); } int ccnet_org_manager_set_org_staff (CcnetOrgManager *mgr, int org_id, const char *email, GError **error) { CcnetDB *db = mgr->priv->db; return seaf_db_statement_query (db, "UPDATE OrgUser SET is_staff = 1 " "WHERE org_id=? AND email=?", 2, "int", org_id, "string", email); } int ccnet_org_manager_unset_org_staff (CcnetOrgManager *mgr, int org_id, const char *email, GError **error) { CcnetDB *db = mgr->priv->db; return seaf_db_statement_query (db, "UPDATE OrgUser SET is_staff = 0 " "WHERE org_id=? AND email=?", 2, "int", org_id, "string", email); } int ccnet_org_manager_set_org_name(CcnetOrgManager *mgr, int org_id, const char *org_name, GError **error) { CcnetDB *db = mgr->priv->db; return seaf_db_statement_query (db, "UPDATE `Organization` set org_name = ? " "WHERE org_id = ?", 2, "string", org_name, "int", org_id); return 0; } ================================================ FILE: common/org-mgr.h ================================================ /* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */ #ifndef _ORG_MGR_H_ #define _ORG_MGR_H_ typedef struct _SeafileSession SeafileSession; typedef struct _CcnetOrgManager CcnetOrgManager; typedef struct _CcnetOrgManagerPriv CcnetOrgManagerPriv; struct _CcnetOrgManager { SeafileSession *session; CcnetOrgManagerPriv *priv; }; CcnetOrgManager* ccnet_org_manager_new (SeafileSession *session); int ccnet_org_manager_prepare (CcnetOrgManager *manager); void ccnet_org_manager_start (CcnetOrgManager *manager); int ccnet_org_manager_create_org (CcnetOrgManager *mgr, const char *org_name, const char *url_prefix, const char *creator, GError **error); int ccnet_org_manager_remove_org (CcnetOrgManager *mgr, int org_id, GError **error); GList * ccnet_org_manager_get_all_orgs (CcnetOrgManager *mgr, int start, int limit); int ccnet_org_manager_count_orgs (CcnetOrgManager *mgr); CcnetOrganization * ccnet_org_manager_get_org_by_url_prefix (CcnetOrgManager *mgr, const char *url_prefix, GError **error); CcnetOrganization * ccnet_org_manager_get_org_by_id (CcnetOrgManager *mgr, int org_id, GError **error); int ccnet_org_manager_add_org_user (CcnetOrgManager *mgr, int org_id, const char *email, int is_staff, GError **error); int ccnet_org_manager_remove_org_user (CcnetOrgManager *mgr, int org_id, const char *email, GError **error); GList * ccnet_org_manager_get_orgs_by_user (CcnetOrgManager *mgr, const char *email, GError **error); GList * ccnet_org_manager_get_org_emailusers (CcnetOrgManager *mgr, const char *url_prefix, int start, int limit); int ccnet_org_manager_add_org_group (CcnetOrgManager *mgr, int org_id, int group_id, GError **error); int ccnet_org_manager_remove_org_group (CcnetOrgManager *mgr, int org_id, int group_id, GError **error); int ccnet_org_manager_is_org_group (CcnetOrgManager *mgr, int group_id, GError **error); int ccnet_org_manager_get_org_id_by_group (CcnetOrgManager *mgr, int group_id, GError **error); GList * ccnet_org_manager_get_org_group_ids (CcnetOrgManager *mgr, int org_id, int start, int limit); GList * ccnet_org_manager_get_org_groups (CcnetOrgManager *mgr, int org_id, int start, int limit); GList * ccnet_org_manager_get_org_groups_by_user (CcnetOrgManager *mgr, const char *user, int org_id); GList * ccnet_org_manager_get_org_top_groups (CcnetOrgManager *mgr, int org_id, GError **error); int ccnet_org_manager_org_user_exists (CcnetOrgManager *mgr, int org_id, const char *email, GError **error); char * ccnet_org_manager_get_url_prefix_by_org_id (CcnetOrgManager *mgr, int org_id, GError **error); int ccnet_org_manager_is_org_staff (CcnetOrgManager *mgr, int org_id, const char *email, GError **error); int ccnet_org_manager_set_org_staff (CcnetOrgManager *mgr, int org_id, const char *email, GError **error); int ccnet_org_manager_unset_org_staff (CcnetOrgManager *mgr, int org_id, const char *email, GError **error); int ccnet_org_manager_set_org_name(CcnetOrgManager *mgr, int org_id, const char *org_name, GError **error); #endif /* _ORG_MGR_H_ */ ================================================ FILE: common/password-hash.c ================================================ /* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */ #include #include #include #include "password-hash.h" #include "seafile-crypt.h" #include #include "utils.h" #include "log.h" // pbkdf2 typedef struct Pbkdf2Params { int iteration; } Pbkdf2Params; static Pbkdf2Params * parse_pbkdf2_sha256_params (const char *params_str) { Pbkdf2Params *params = NULL; if (!params_str) { params = g_new0 (Pbkdf2Params, 1); params->iteration = 1000; return params; } int iteration; iteration = atoi (params_str); if (iteration <= 0) { iteration = 1000; } params = g_new0 (Pbkdf2Params, 1); params->iteration = iteration; return params; } static int pbkdf2_sha256_derive_key (const char *data_in, int in_len, const char *salt, Pbkdf2Params *params, unsigned char *key) { int iteration = params->iteration; unsigned char salt_bin[32] = {0}; hex_to_rawdata (salt, salt_bin, 32); PKCS5_PBKDF2_HMAC (data_in, in_len, salt_bin, sizeof(salt_bin), iteration, EVP_sha256(), 32, key); return 0; } // argon2id typedef struct Argon2idParams{ gint64 time_cost; gint64 memory_cost; gint64 parallelism; } Argon2idParams; // The arguments to argon2 are separated by commas. // Example arguments format: // 2,102400,8 // The parameters are time_cost, memory_cost, parallelism from left to right. static Argon2idParams * parse_argon2id_params (const char *params_str) { char **params; Argon2idParams *argon2_params = g_new0 (Argon2idParams, 1); if (params_str) params = g_strsplit (params_str, ",", 3); if (!params_str || g_strv_length(params) != 3) { if (params_str) g_strfreev (params); argon2_params->time_cost = 2; // 2-pass computation argon2_params->memory_cost = 102400; // 100 mebibytes memory usage argon2_params->parallelism = 8; // number of threads and lanes return argon2_params; } char *p = NULL; p = g_strstrip (params[0]); argon2_params->time_cost = atoll (p); if (argon2_params->time_cost <= 0) { argon2_params->time_cost = 2; } p = g_strstrip (params[1]); argon2_params->memory_cost = atoll (p); if (argon2_params->memory_cost <= 0) { argon2_params->memory_cost = 102400; } p = g_strstrip (params[2]); argon2_params->parallelism = atoll (p); if (argon2_params->parallelism <= 0) { argon2_params->parallelism = 8; } g_strfreev (params); return argon2_params; } static int argon2id_derive_key (const char *data_in, int in_len, const char *salt, Argon2idParams *params, unsigned char *key) { unsigned char salt_bin[32] = {0}; hex_to_rawdata (salt, salt_bin, 32); argon2id_hash_raw(params->time_cost, params->memory_cost, params->parallelism, data_in, in_len, salt_bin, sizeof(salt_bin), key, 32); return 0; } // parse_pwd_hash_params is used to parse default pwd hash algorithms. void parse_pwd_hash_params (const char *algo, const char *params_str, PwdHashParams *params) { if (g_strcmp0 (algo, PWD_HASH_PDKDF2) == 0) { params->algo = g_strdup (PWD_HASH_PDKDF2); if (params_str) params->params_str = g_strdup (params_str); else params->params_str = g_strdup ("1000"); } else if (g_strcmp0 (algo, PWD_HASH_ARGON2ID) == 0) { params->algo = g_strdup (PWD_HASH_ARGON2ID); if (params_str) params->params_str = g_strdup (params_str); else params->params_str = g_strdup ("2,102400,8"); } else { params->algo = NULL; } seaf_message ("password hash algorithms: %s, params: %s\n ", params->algo, params->params_str); } int pwd_hash_derive_key (const char *data_in, int in_len, const char *salt, const char *algo, const char *params_str, unsigned char *key) { int ret = 0; if (g_strcmp0 (algo, PWD_HASH_ARGON2ID) == 0) { Argon2idParams *algo_params = parse_argon2id_params (params_str); ret = argon2id_derive_key (data_in, in_len, salt, algo_params, key); g_free (algo_params); return ret; } else { Pbkdf2Params *algo_params = parse_pbkdf2_sha256_params (params_str); ret = pbkdf2_sha256_derive_key (data_in, in_len, salt, algo_params, key); g_free (algo_params); return ret; } } ================================================ FILE: common/password-hash.h ================================================ /* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */ #ifndef _PASSWORD_HASH_H #define _PASSWORD_HASH_H #define PWD_HASH_PDKDF2 "pbkdf2_sha256" #define PWD_HASH_ARGON2ID "argon2id" typedef struct _PwdHashParams { char *algo; char *params_str; } PwdHashParams; void parse_pwd_hash_params (const char *algo, const char *params_str, PwdHashParams *params); int pwd_hash_derive_key (const char *data_in, int in_len, const char *repo_salt, const char *algo, const char *params_str, unsigned char *key); #endif ================================================ FILE: common/processors/objecttx-common.h ================================================ /* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */ #ifndef OBJECTTX_COMMON_H #define OBJECTTX_COMMON_H #define SC_GET_OBJECT "301" #define SS_GET_OBJECT "Get Object" #define SC_OBJECT "302" #define SS_OBJECT "Object" #define SC_END "303" #define SS_END "END" #define SC_COMMIT_IDS "304" #define SS_COMMIT_IDS "Commit IDs" #define SC_ACK "305" #define SS_ACK "Ack" #define SC_OBJ_SEG "306" #define SS_OBJ_SEG "Object Segment" #define SC_OBJ_SEG_END "307" #define SS_OBJ_SEG_END "Object Segment End" #define SC_OBJ_LIST_SEG "308" #define SS_OBJ_LIST_SEG "Object List Segment" #define SC_OBJ_LIST_SEG_END "309" #define SS_OBJ_LIST_SEG_END "Object List Segment End" #define SC_NOT_FOUND "401" #define SS_NOT_FOUND "Object not found" #define SC_BAD_OL "402" #define SS_BAD_OL "Bad Object List" #define SC_BAD_OBJECT "403" #define SS_BAD_OBJECT "Bad Object" #define SC_ACCESS_DENIED "410" #define SS_ACCESS_DENIED "Access denied" /* for fs transfer */ #define SC_ROOT "304" #define SS_ROOT "FS Root" #define SC_ROOT_END "305" #define SS_ROOT_END "FS Root End" /* max fs object segment size */ #define MAX_OBJ_SEG_SIZE 64000 typedef struct { char id[41]; uint8_t object[0]; } __attribute__((__packed__)) ObjectPack; #endif ================================================ FILE: common/redis-cache.c ================================================ /* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */ #include "common.h" #include #include "redis-cache.h" #define DEBUG_FLAG SEAFILE_DEBUG_OTHER #include "log.h" struct _RedisConnectionPool { char *host; int port; GPtrArray *connections; pthread_mutex_t lock; int max_connections; }; typedef struct _RedisConnectionPool RedisConnectionPool; struct _RedisConnection { gboolean is_available; redisContext *ac; gint64 ctime; /* Used to clean up unused connection. */ gboolean release; /* If TRUE, the connection will be released. */ }; typedef struct _RedisConnection RedisConnection; typedef struct RedisPriv { RedisConnectionPool *redis_pool; char *passwd; } RedisPriv; static int redis_auth (RedisConnection *conn, const char *passwd) { redisReply *reply; int ret = 0; if (!passwd) { return 0; } reply = redisCommand(conn->ac, "AUTH %s", passwd); if (!reply) { seaf_warning ("Failed to auth redis server.\n"); ret = -1; goto out; } if (reply->type != REDIS_REPLY_STATUS || g_strcmp0 (reply->str, "OK") != 0) { if (reply->type == REDIS_REPLY_ERROR) { seaf_warning ("Failed to auth redis: %s.\n", reply->str); } ret = -1; goto out; } out: freeReplyObject (reply); return ret; } static RedisConnection * redis_connection_new (const char *host, const char *passwd, int port) { RedisConnection *conn = g_new0 (RedisConnection, 1); conn->ac = redisConnect(host, port); if (!conn->ac || conn->ac->err) { if (conn->ac) { seaf_warning ("Failed to connect to redis : %s\n", conn->ac->errstr); redisFree (conn->ac); } else { seaf_warning ("Can't allocate redis context\n"); } g_free (conn); return NULL; } if (redis_auth (conn, passwd) < 0) { redisFree (conn->ac); g_free (conn); return NULL; } conn->ctime = (gint64)time(NULL); return conn; } static void redis_connection_free (RedisConnection *conn) { if (!conn) return; if (conn->ac) redisFree(conn->ac); g_free (conn); } static RedisConnectionPool * redis_connection_pool_new (const char *host, int port, int max_connections) { RedisConnectionPool *pool = g_new0 (RedisConnectionPool, 1); pool->host = g_strdup(host); pool->port = port; pool->connections = g_ptr_array_sized_new (max_connections); pool->max_connections = max_connections; pthread_mutex_init (&pool->lock, NULL); return pool; } static RedisConnection * redis_connection_pool_get_connection (RedisConnectionPool *pool, const char *passwd) { RedisConnection *conn = NULL; if (pool->max_connections == 0) { conn = redis_connection_new (pool->host, passwd, pool->port); return conn; } pthread_mutex_lock (&pool->lock); guint i, size = pool->connections->len; for (i = 0; i < size; ++i) { conn = g_ptr_array_index (pool->connections, i); if (!conn->is_available) { continue; } conn->is_available = FALSE; goto out; } conn = NULL; if (size < pool->max_connections) { conn = redis_connection_new (pool->host, passwd, pool->port); if (conn) { conn->is_available = FALSE; g_ptr_array_add (pool->connections, conn); } } else { seaf_warning ("The number of redis connections exceeds the limit. The maximum connections is %d.\n", pool->max_connections); } out: pthread_mutex_unlock (&pool->lock); return conn; } static void redis_connection_pool_return_connection (RedisConnectionPool *pool, RedisConnection *conn) { if (!conn) return; if (pool->max_connections == 0) { redis_connection_free (conn); return; } if (conn->release) { pthread_mutex_lock (&pool->lock); g_ptr_array_remove (pool->connections, conn); pthread_mutex_unlock (&pool->lock); redis_connection_free (conn); return; } pthread_mutex_lock (&pool->lock); conn->is_available = TRUE; pthread_mutex_unlock (&pool->lock); } void * redis_cache_get_object (ObjCache *cache, const char *obj_id, size_t *len) { RedisConnection *conn; char *object = NULL; redisReply *reply; RedisPriv *priv = cache->priv; RedisConnectionPool *pool = priv->redis_pool; conn = redis_connection_pool_get_connection (pool, priv->passwd); if (!conn) { seaf_warning ("Failed to get redis connection to host %s.\n", cache->host); return NULL; } reply = redisCommand(conn->ac, "GET %s", obj_id); if (!reply) { seaf_warning ("Failed to get object %s from redis cache.\n", obj_id); conn->release = TRUE; goto out; } if (reply->type != REDIS_REPLY_STRING) { if (reply->type == REDIS_REPLY_ERROR) { conn->release = TRUE; seaf_warning ("Failed to get %s from redis cache: %s.\n", obj_id, reply->str); } goto out; } *len = reply->len; object = g_memdup (reply->str, reply->len); out: freeReplyObject(reply); redis_connection_pool_return_connection (pool, conn); return object; } int redis_cache_set_object (ObjCache *cache, const char *obj_id, const void *object, int len, int expiry) { RedisConnection *conn; redisReply *reply; int ret = 0; RedisPriv *priv = cache->priv; RedisConnectionPool *pool = priv->redis_pool; conn = redis_connection_pool_get_connection (pool, priv->passwd); if (!conn) { seaf_warning ("Failed to get redis connection to host %s.\n", cache->host); return -1; } if (expiry <= 0) expiry = cache->mc_expiry; reply = redisCommand(conn->ac, "SET %s %b EX %d", obj_id, object, len, expiry); if (!reply) { seaf_warning ("Failed to set object %s to redis cache.\n", obj_id); ret = -1; conn->release = TRUE; goto out; } if (reply->type != REDIS_REPLY_STATUS || g_strcmp0 (reply->str, "OK") != 0) { if (reply->type == REDIS_REPLY_ERROR) { conn->release = TRUE; seaf_warning ("Failed to set %s to redis: %s.\n", obj_id, reply->str); } ret = -1; } out: freeReplyObject(reply); redis_connection_pool_return_connection (pool, conn); return ret; } gboolean redis_cache_test_object (ObjCache *cache, const char *obj_id) { RedisConnection *conn; redisReply *reply; gboolean ret = FALSE; RedisPriv *priv = cache->priv; RedisConnectionPool *pool = priv->redis_pool; conn = redis_connection_pool_get_connection (pool, priv->passwd); if (!conn) { seaf_warning ("Failed to get redis connection to host %s.\n", cache->host); return ret; } reply = redisCommand(conn->ac, "EXISTS %s", obj_id); if (!reply) { seaf_warning ("Failed to test object %s from redis cache.\n", obj_id); conn->release = TRUE; goto out; } if (reply->type != REDIS_REPLY_INTEGER || reply->integer != 1) { if (reply->type == REDIS_REPLY_ERROR) { conn->release = TRUE; seaf_warning ("Failed to test %s from redis: %s.\n", obj_id, reply->str); } goto out; } ret = TRUE; out: freeReplyObject(reply); redis_connection_pool_return_connection (pool, conn); return ret; } int redis_cache_delete_object (ObjCache *cache, const char *obj_id) { RedisConnection *conn; redisReply *reply; int ret = 0; RedisPriv *priv = cache->priv; RedisConnectionPool *pool = priv->redis_pool; conn = redis_connection_pool_get_connection (pool, priv->passwd); if (!conn) { seaf_warning ("Failed to get redis connection to host %s.\n", cache->host); return -1; } reply = redisCommand(conn->ac, "DEL %s", obj_id); if (!reply) { seaf_warning ("Failed to delete object %s from redis cache.\n", obj_id); ret = -1; conn->release = TRUE; goto out; } if (reply->type != REDIS_REPLY_INTEGER || reply->integer != 1) { if (reply->type == REDIS_REPLY_ERROR) { conn->release = TRUE; seaf_warning ("Failed to del %s from redis: %s.\n", obj_id, reply->str); } ret = -1; } out: freeReplyObject(reply); redis_connection_pool_return_connection (pool, conn); return ret; } int redis_cache_publish (ObjCache *cache, const char *channel, const char *msg) { RedisConnection *conn; redisReply *reply; int ret = 0; RedisPriv *priv = cache->priv; RedisConnectionPool *pool = priv->redis_pool; conn = redis_connection_pool_get_connection (pool, priv->passwd); if (!conn) { seaf_warning ("Failed to get redis connection to host %s.\n", cache->host); return -1; } reply = redisCommand(conn->ac, "PUBLISH %s %s", channel, msg); if (!reply) { seaf_warning ("Failed to publish message to redis channel %s.\n", channel); ret = -1; conn->release = TRUE; goto out; } if (reply->type != REDIS_REPLY_INTEGER || reply->integer < 0) { if (reply->type == REDIS_REPLY_ERROR) { conn->release = TRUE; seaf_warning ("Failed to publish message to redis channel %s.\n", channel); } ret = -1; } out: freeReplyObject(reply); redis_connection_pool_return_connection (pool, conn); return ret; } int redis_cache_push (ObjCache *cache, const char *list, const char *msg) { RedisConnection *conn; redisReply *reply; int ret = 0; RedisPriv *priv = cache->priv; RedisConnectionPool *pool = priv->redis_pool; conn = redis_connection_pool_get_connection (pool, priv->passwd); if (!conn) { seaf_warning ("Failed to get redis connection to host %s.\n", cache->host); return -1; } reply = redisCommand(conn->ac, "LPUSH %s %s", list, msg); if (!reply) { seaf_warning ("Failed to push message to redis list %s.\n", list); ret = -1; conn->release = TRUE; goto out; } if (reply->type != REDIS_REPLY_INTEGER || reply->integer < 0) { if (reply->type == REDIS_REPLY_ERROR) { conn->release = TRUE; seaf_warning ("Failed to push message to redis list %s.\n", list); } ret = -1; } out: freeReplyObject(reply); redis_connection_pool_return_connection (pool, conn); return ret; } ObjCache * redis_cache_new (const char *host, const char *passwd, int port, int redis_expiry, int max_connections) { ObjCache *cache = g_new0 (ObjCache, 1); RedisPriv *priv = g_new0 (RedisPriv, 1); priv->redis_pool = redis_connection_pool_new (host, port, max_connections); cache->priv = priv; cache->host = g_strdup (host); priv->passwd = g_strdup (passwd); cache->port = port; cache->mc_expiry = redis_expiry; cache->cache_type = TYPE_REDIS; cache->get_object = redis_cache_get_object; cache->set_object = redis_cache_set_object; cache->test_object = redis_cache_test_object; cache->delete_object = redis_cache_delete_object; cache->publish = redis_cache_publish; cache->push = redis_cache_push; return cache; } ================================================ FILE: common/redis-cache.h ================================================ /* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */ #ifndef REDIS_CACHE_H #define REDIS_CACHE_H #include "obj-cache.h" ObjCache * redis_cache_new (const char *host, const char *passwd, int port, int mc_expiry, int max_connections); #endif ================================================ FILE: common/rpc-service.c ================================================ /* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */ #include "common.h" #include #include #include #include #include "utils.h" #include "seafile-session.h" #include "seaf-utils.h" #include "fs-mgr.h" #include "repo-mgr.h" #include "seafile-error.h" #include "seafile-rpc.h" #include "mq-mgr.h" #include "password-hash.h" #ifdef SEAFILE_SERVER #include "web-accesstoken-mgr.h" #endif #ifndef SEAFILE_SERVER #include "seafile-config.h" #endif #define DEBUG_FLAG SEAFILE_DEBUG_OTHER #include "log.h" #ifndef SEAFILE_SERVER #include "../daemon/vc-utils.h" #endif /* SEAFILE_SERVER */ /* -------- Utilities -------- */ static GObject* convert_repo (SeafRepo *r) { SeafileRepo *repo = NULL; #ifndef SEAFILE_SERVER if (r->head == NULL) return NULL; if (r->worktree_invalid && !seafile_session_config_get_allow_invalid_worktree(seaf)) return NULL; #endif repo = seafile_repo_new (); if (!repo) return NULL; g_object_set (repo, "id", r->id, "name", r->name, "desc", r->desc, "encrypted", r->encrypted, "magic", r->magic, "enc_version", r->enc_version, "pwd_hash", r->pwd_hash, "pwd_hash_algo", r->pwd_hash_algo, "pwd_hash_params", r->pwd_hash_params, "head_cmmt_id", r->head ? r->head->commit_id : NULL, "root", r->root_id, "version", r->version, "last_modify", r->last_modify, "last_modifier", r->last_modifier, NULL); g_object_set (repo, "repo_id", r->id, "repo_name", r->name, "repo_desc", r->desc, "last_modified", r->last_modify, "status", r->status, "repo_type", r->type, NULL); #ifdef SEAFILE_SERVER if (r->virtual_info) { g_object_set (repo, "is_virtual", TRUE, "origin_repo_id", r->virtual_info->origin_repo_id, "origin_path", r->virtual_info->path, NULL); } if (r->encrypted) { if (r->enc_version >= 2) g_object_set (repo, "random_key", r->random_key, NULL); if (r->enc_version >= 3) g_object_set (repo, "salt", r->salt, NULL); } g_object_set (repo, "store_id", r->store_id, "repaired", r->repaired, "size", r->size, "file_count", r->file_count, NULL); g_object_set (repo, "is_corrupted", r->is_corrupted, NULL); #endif #ifndef SEAFILE_SERVER g_object_set (repo, "worktree", r->worktree, "relay-id", r->relay_id, "worktree-invalid", r->worktree_invalid, "last-sync-time", r->last_sync_time, "auto-sync", r->auto_sync, NULL); #endif /* SEAFILE_SERVER */ return (GObject *)repo; } static void free_repo_obj (gpointer repo) { if (!repo) return; g_object_unref ((GObject *)repo); } static GList * convert_repo_list (GList *inner_repos) { GList *ret = NULL, *ptr; GObject *repo = NULL; for (ptr = inner_repos; ptr; ptr=ptr->next) { SeafRepo *r = ptr->data; repo = convert_repo (r); if (!repo) { g_list_free_full (ret, free_repo_obj); return NULL; } ret = g_list_prepend (ret, repo); } return g_list_reverse (ret); } /* * RPC functions available for both clients and server. */ GList * seafile_branch_gets (const char *repo_id, GError **error) { if (!is_uuid_valid (repo_id)) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid repo id"); return NULL; } GList *blist = seaf_branch_manager_get_branch_list(seaf->branch_mgr, repo_id); GList *ptr; GList *ret = NULL; for (ptr = blist; ptr; ptr=ptr->next) { SeafBranch *b = ptr->data; SeafileBranch *branch = seafile_branch_new (); g_object_set (branch, "repo_id", b->repo_id, "name", b->name, "commit_id", b->commit_id, NULL); ret = g_list_prepend (ret, branch); seaf_branch_unref (b); } ret = g_list_reverse (ret); g_list_free (blist); return ret; } #ifdef SEAFILE_SERVER GList* seafile_get_trash_repo_list (int start, int limit, GError **error) { return seaf_repo_manager_get_trash_repo_list (seaf->repo_mgr, start, limit, error); } GList * seafile_get_trash_repos_by_owner (const char *owner, GError **error) { if (!owner) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Argument should not be null"); return NULL; } return seaf_repo_manager_get_trash_repos_by_owner (seaf->repo_mgr, owner, error); } int seafile_del_repo_from_trash (const char *repo_id, GError **error) { int ret = 0; if (!repo_id) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Argument should not be null"); return -1; } if (!is_uuid_valid (repo_id)) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid repo id"); return -1; } ret = seaf_repo_manager_del_repo_from_trash (seaf->repo_mgr, repo_id, error); return ret; } int seafile_empty_repo_trash (GError **error) { return seaf_repo_manager_empty_repo_trash (seaf->repo_mgr, error); } int seafile_empty_repo_trash_by_owner (const char *owner, GError **error) { if (!owner) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Argument should not be null"); return -1; } return seaf_repo_manager_empty_repo_trash_by_owner (seaf->repo_mgr, owner, error); } int seafile_restore_repo_from_trash (const char *repo_id, GError **error) { int ret = 0; if (!repo_id) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Argument should not be null"); return -1; } if (!is_uuid_valid (repo_id)) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid repo id"); return -1; } ret = seaf_repo_manager_restore_repo_from_trash (seaf->repo_mgr, repo_id, error); return ret; } int seafile_publish_event(const char *channel, const char *content, GError **error) { int ret = 0; if (!channel || !content) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Argument should not be null"); return -1; } ret = seaf_mq_manager_publish_event (seaf->mq_mgr, channel, content); return ret; } json_t * seafile_pop_event(const char *channel, GError **error) { if (!channel) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Argument should not be null"); return NULL; } return seaf_mq_manager_pop_event (seaf->mq_mgr, channel); } #endif GList* seafile_get_repo_list (int start, int limit, const char *order_by, int ret_virt_repo, GError **error) { GList *repos = seaf_repo_manager_get_repo_list(seaf->repo_mgr, start, limit, order_by, ret_virt_repo); GList *ret = NULL; ret = convert_repo_list (repos); #ifdef SEAFILE_SERVER GList *ptr; for (ptr = repos; ptr != NULL; ptr = ptr->next) seaf_repo_unref ((SeafRepo *)ptr->data); #endif g_list_free (repos); return ret; } #ifdef SEAFILE_SERVER gint64 seafile_count_repos (GError **error) { return seaf_repo_manager_count_repos (seaf->repo_mgr, error); } #endif GObject* seafile_get_repo (const char *repo_id, GError **error) { SeafRepo *r; if (!repo_id) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Argument should not be null"); return NULL; } if (!is_uuid_valid (repo_id)) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid repo id"); return NULL; } r = seaf_repo_manager_get_repo (seaf->repo_mgr, repo_id); /* Don't return repo that's not checked out. */ if (r == NULL) return NULL; GObject *repo = convert_repo (r); #ifdef SEAFILE_SERVER seaf_repo_unref (r); #endif return repo; } SeafileCommit * convert_to_seafile_commit (SeafCommit *c) { SeafileCommit *commit = seafile_commit_new (); g_object_set (commit, "id", c->commit_id, "creator_name", c->creator_name, "creator", c->creator_id, "desc", c->desc, "ctime", c->ctime, "repo_id", c->repo_id, "root_id", c->root_id, "parent_id", c->parent_id, "second_parent_id", c->second_parent_id, "version", c->version, "new_merge", c->new_merge, "conflict", c->conflict, "device_name", c->device_name, "client_version", c->client_version, NULL); return commit; } GObject* seafile_get_commit (const char *repo_id, int version, const gchar *id, GError **error) { SeafileCommit *commit; SeafCommit *c; if (!repo_id || !is_uuid_valid(repo_id)) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid repo id"); return NULL; } if (!id || !is_object_id_valid(id)) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid commit id"); return NULL; } c = seaf_commit_manager_get_commit (seaf->commit_mgr, repo_id, version, id); if (!c) return NULL; commit = convert_to_seafile_commit (c); seaf_commit_unref (c); return (GObject *)commit; } struct CollectParam { int offset; int limit; int count; GList *commits; #ifdef SEAFILE_SERVER gint64 truncate_time; gboolean traversed_head; #endif }; static gboolean get_commit (SeafCommit *c, void *data, gboolean *stop) { struct CollectParam *cp = data; #ifdef SEAFILE_SERVER if (cp->truncate_time == 0) { *stop = TRUE; /* Stop after traversing the head commit. */ } /* We use <= here. This is for handling clean trash and history. * If the user cleans all history, truncate time will be equal to * the commit's ctime. In such case, we don't actually want to display * this commit. */ else if (cp->truncate_time > 0 && (gint64)(c->ctime) <= cp->truncate_time && cp->traversed_head) { /* Still traverse the first commit older than truncate_time. * If a file in the child commit of this commit is deleted, * we need to access this commit in order to restore it * from trash. */ *stop = TRUE; } /* Always traverse the head commit. */ if (!cp->traversed_head) cp->traversed_head = TRUE; #endif /* if offset = 1, limit = 1, we should stop when the count = 2 */ if (cp->limit > 0 && cp->count >= cp->offset + cp->limit) { *stop = TRUE; return TRUE; /* TRUE to indicate no error */ } if (cp->count >= cp->offset) { SeafileCommit *commit = convert_to_seafile_commit (c); cp->commits = g_list_prepend (cp->commits, commit); } ++cp->count; return TRUE; /* TRUE to indicate no error */ } GList* seafile_get_commit_list (const char *repo_id, int offset, int limit, GError **error) { SeafRepo *repo; GList *commits = NULL; gboolean ret; struct CollectParam cp; char *commit_id; if (!is_uuid_valid (repo_id)) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid repo id"); return NULL; } /* correct parameter */ if (offset < 0) offset = 0; if (!repo_id) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Argument should not be null"); return NULL; } repo = seaf_repo_manager_get_repo (seaf->repo_mgr, repo_id); if (!repo) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_REPO, "No such repository"); return NULL; } if (!repo->head) { SeafBranch *branch = seaf_branch_manager_get_branch (seaf->branch_mgr, repo->id, "master"); if (branch != NULL) { commit_id = g_strdup (branch->commit_id); seaf_branch_unref (branch); } else { seaf_warning ("[repo-mgr] Failed to get repo %s branch master\n", repo_id); g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_REPO, "No head and branch master"); #ifdef SEAFILE_SERVER seaf_repo_unref (repo); #endif return NULL; } } else { commit_id = g_strdup (repo->head->commit_id); } /* Init CollectParam */ memset (&cp, 0, sizeof(cp)); cp.offset = offset; cp.limit = limit; #ifdef SEAFILE_SERVER cp.truncate_time = seaf_repo_manager_get_repo_truncate_time (seaf->repo_mgr, repo_id); #endif ret = seaf_commit_manager_traverse_commit_tree (seaf->commit_mgr, repo->id, repo->version, commit_id, get_commit, &cp, TRUE); g_free (commit_id); #ifdef SEAFILE_SERVER seaf_repo_unref (repo); #endif if (!ret) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_LIST_COMMITS, "Failed to list commits"); return NULL; } commits = g_list_reverse (cp.commits); return commits; } #ifndef SEAFILE_SERVER static int do_unsync_repo(SeafRepo *repo) { if (!seaf->started) { seaf_message ("System not started, skip removing repo.\n"); return -1; } if (repo->auto_sync && (repo->sync_interval == 0)) seaf_wt_monitor_unwatch_repo (seaf->wt_monitor, repo->id); seaf_sync_manager_cancel_sync_task (seaf->sync_mgr, repo->id); SyncInfo *info = seaf_sync_manager_get_sync_info (seaf->sync_mgr, repo->id); /* If we are syncing the repo, * we just mark the repo as deleted and let sync-mgr actually delete it. * Otherwise we are safe to delete the repo. */ char *worktree = g_strdup (repo->worktree); if (info != NULL && info->in_sync) { seaf_repo_manager_mark_repo_deleted (seaf->repo_mgr, repo); } else { seaf_repo_manager_del_repo (seaf->repo_mgr, repo); } g_free (worktree); return 0; } static void cancel_clone_tasks_by_account (const char *account_server, const char *account_email) { GList *ptr, *tasks; CloneTask *task; tasks = seaf_clone_manager_get_tasks (seaf->clone_mgr); for (ptr = tasks; ptr != NULL; ptr = ptr->next) { task = ptr->data; if (g_strcmp0(account_server, task->peer_addr) == 0 && g_strcmp0(account_email, task->email) == 0) { seaf_clone_manager_cancel_task (seaf->clone_mgr, task->repo_id); } } g_list_free (tasks); } int seafile_unsync_repos_by_account (const char *server_addr, const char *email, GError **error) { if (!server_addr || !email) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Argument should not be null"); return -1; } GList *ptr, *repos = seaf_repo_manager_get_repo_list(seaf->repo_mgr, -1, -1, NULL, 0); if (!repos) { return 0; } for (ptr = repos; ptr; ptr = ptr->next) { SeafRepo *repo = (SeafRepo*)ptr->data; char *addr = NULL; seaf_repo_manager_get_repo_relay_info(seaf->repo_mgr, repo->id, &addr, /* addr */ NULL); /* port */ if (g_strcmp0(addr, server_addr) == 0 && g_strcmp0(repo->email, email) == 0) { if (do_unsync_repo(repo) < 0) { return -1; } } g_free (addr); } g_list_free (repos); cancel_clone_tasks_by_account (server_addr, email); return 0; } int seafile_remove_repo_tokens_by_account (const char *server_addr, const char *email, GError **error) { if (!server_addr || !email) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Argument should not be null"); return -1; } GList *ptr, *repos = seaf_repo_manager_get_repo_list(seaf->repo_mgr, -1, -1, NULL, 0); if (!repos) { return 0; } for (ptr = repos; ptr; ptr = ptr->next) { SeafRepo *repo = (SeafRepo*)ptr->data; char *addr = NULL; seaf_repo_manager_get_repo_relay_info(seaf->repo_mgr, repo->id, &addr, /* addr */ NULL); /* port */ if (g_strcmp0(addr, server_addr) == 0 && g_strcmp0(repo->email, email) == 0) { if (seaf_repo_manager_remove_repo_token(seaf->repo_mgr, repo) < 0) { return -1; } } g_free (addr); } g_list_free (repos); cancel_clone_tasks_by_account (server_addr, email); return 0; } int seafile_set_repo_token (const char *repo_id, const char *token, GError **error) { int ret; if (repo_id == NULL || token == NULL) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Arguments should not be empty"); return -1; } SeafRepo *repo; repo = seaf_repo_manager_get_repo (seaf->repo_mgr, repo_id); if (!repo) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_REPO, "Can't find Repo %s", repo_id); return -1; } ret = seaf_repo_manager_set_repo_token (seaf->repo_mgr, repo, token); if (ret < 0) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_INTERNAL, "Failed to set token for repo %s", repo_id); return -1; } return 0; } #endif int seafile_destroy_repo (const char *repo_id, GError **error) { if (!repo_id) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Argument should not be null"); return -1; } if (!is_uuid_valid (repo_id)) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid repo id"); return -1; } #ifndef SEAFILE_SERVER SeafRepo *repo; repo = seaf_repo_manager_get_repo (seaf->repo_mgr, repo_id); if (!repo) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "No such repository"); return -1; } return do_unsync_repo(repo); #else return seaf_repo_manager_del_repo (seaf->repo_mgr, repo_id, error); #endif } GObject * seafile_generate_magic_and_random_key(int enc_version, const char* repo_id, const char *passwd, GError **error) { if (!repo_id || !passwd) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Argument should not be null"); return NULL; } gchar salt[65] = {0}; gchar magic[65] = {0}; gchar pwd_hash[65] = {0}; gchar random_key[97] = {0}; if (enc_version >= 3 && seafile_generate_repo_salt (salt) < 0) { return NULL; } seafile_generate_magic (enc_version, repo_id, passwd, salt, magic); if (seafile_generate_random_key (passwd, enc_version, salt, random_key) < 0) { return NULL; } SeafileEncryptionInfo *sinfo; sinfo = g_object_new (SEAFILE_TYPE_ENCRYPTION_INFO, "repo_id", repo_id, "passwd", passwd, "enc_version", enc_version, "magic", magic, "random_key", random_key, NULL); if (enc_version >= 3) g_object_set (sinfo, "salt", salt, NULL); return (GObject *)sinfo; } #include "diff-simple.h" inline static const char* get_diff_status_str(char status) { if (status == DIFF_STATUS_ADDED) return "add"; if (status == DIFF_STATUS_DELETED) return "del"; if (status == DIFF_STATUS_MODIFIED) return "mod"; if (status == DIFF_STATUS_RENAMED) return "mov"; if (status == DIFF_STATUS_DIR_ADDED) return "newdir"; if (status == DIFF_STATUS_DIR_DELETED) return "deldir"; return NULL; } GList * seafile_diff (const char *repo_id, const char *arg1, const char *arg2, int fold_dir_results, GError **error) { SeafRepo *repo; char *err_msgs = NULL; GList *diff_entries, *p; GList *ret = NULL; if (!repo_id || !arg1 || !arg2) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Argument should not be null"); return NULL; } if (!is_uuid_valid (repo_id)) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid repo id"); return NULL; } if ((arg1[0] != 0 && !is_object_id_valid (arg1)) || !is_object_id_valid(arg2)) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid commit id"); return NULL; } repo = seaf_repo_manager_get_repo (seaf->repo_mgr, repo_id); if (!repo) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "No such repository"); return NULL; } diff_entries = seaf_repo_diff (repo, arg1, arg2, fold_dir_results, &err_msgs); if (err_msgs) { g_set_error (error, SEAFILE_DOMAIN, -1, "%s", err_msgs); g_free (err_msgs); #ifdef SEAFILE_SERVER seaf_repo_unref (repo); #endif return NULL; } #ifdef SEAFILE_SERVER seaf_repo_unref (repo); #endif for (p = diff_entries; p != NULL; p = p->next) { DiffEntry *de = p->data; SeafileDiffEntry *entry = g_object_new ( SEAFILE_TYPE_DIFF_ENTRY, "status", get_diff_status_str(de->status), "name", de->name, "new_name", de->new_name, NULL); ret = g_list_prepend (ret, entry); } for (p = diff_entries; p != NULL; p = p->next) { DiffEntry *de = p->data; diff_entry_free (de); } g_list_free (diff_entries); return g_list_reverse (ret); } /* * RPC functions only available for server. */ #ifdef SEAFILE_SERVER GList * seafile_list_dir_by_path(const char *repo_id, const char *commit_id, const char *path, GError **error) { SeafRepo *repo = NULL; SeafCommit *commit = NULL; SeafDir *dir; SeafDirent *dent; SeafileDirent *d; GList *ptr; GList *res = NULL; if (!repo_id || !commit_id || !path) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Args can't be NULL"); return NULL; } if (!is_uuid_valid (repo_id)) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid repo id"); return NULL; } if (!is_object_id_valid (commit_id)) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid commit id"); return NULL; } repo = seaf_repo_manager_get_repo (seaf->repo_mgr, repo_id); if (!repo) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Bad repo id"); return NULL; } commit = seaf_commit_manager_get_commit (seaf->commit_mgr, repo_id, repo->version, commit_id); if (!commit) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_COMMIT, "No such commit"); goto out; } char *rpath = format_dir_path (path); dir = seaf_fs_manager_get_seafdir_by_path (seaf->fs_mgr, repo->store_id, repo->version, commit->root_id, rpath, error); g_free (rpath); if (!dir) { seaf_warning ("Can't find seaf dir for %s in repo %s\n", path, repo->store_id); goto out; } for (ptr = dir->entries; ptr != NULL; ptr = ptr->next) { dent = ptr->data; if (!is_object_id_valid (dent->id)) continue; d = g_object_new (SEAFILE_TYPE_DIRENT, "obj_id", dent->id, "obj_name", dent->name, "mode", dent->mode, "version", dent->version, "mtime", dent->mtime, "size", dent->size, NULL); res = g_list_prepend (res, d); } seaf_dir_free (dir); res = g_list_reverse (res); out: seaf_repo_unref (repo); seaf_commit_unref (commit); return res; } static void filter_error (GError **error) { if (*error && g_error_matches(*error, SEAFILE_DOMAIN, SEAF_ERR_PATH_NO_EXIST)) { g_clear_error (error); } } char * seafile_get_dir_id_by_commit_and_path(const char *repo_id, const char *commit_id, const char *path, GError **error) { SeafRepo *repo = NULL; char *res = NULL; SeafCommit *commit = NULL; SeafDir *dir; if (!repo_id || !commit_id || !path) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Args can't be NULL"); return NULL; } if (!is_uuid_valid (repo_id)) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid repo id"); return NULL; } if (!is_object_id_valid (commit_id)) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid commit id"); return NULL; } repo = seaf_repo_manager_get_repo (seaf->repo_mgr, repo_id); if (!repo) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Bad repo id"); return NULL; } commit = seaf_commit_manager_get_commit (seaf->commit_mgr, repo_id, repo->version, commit_id); if (!commit) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_COMMIT, "No such commit"); goto out; } char *rpath = format_dir_path (path); dir = seaf_fs_manager_get_seafdir_by_path (seaf->fs_mgr, repo->store_id, repo->version, commit->root_id, rpath, error); g_free (rpath); if (!dir) { seaf_warning ("Can't find seaf dir for %s in repo %s\n", path, repo->store_id); filter_error (error); goto out; } res = g_strdup (dir->dir_id); seaf_dir_free (dir); out: seaf_repo_unref (repo); seaf_commit_unref (commit); return res; } int seafile_edit_repo (const char *repo_id, const char *name, const char *description, const char *user, GError **error) { return seaf_repo_manager_edit_repo (repo_id, name, description, user, error); } int seafile_change_repo_passwd (const char *repo_id, const char *old_passwd, const char *new_passwd, const char *user, GError **error) { SeafRepo *repo = NULL; SeafCommit *commit = NULL, *parent = NULL; int ret = 0; if (!user) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "No user given"); return -1; } if (!old_passwd || old_passwd[0] == 0 || !new_passwd || new_passwd[0] == 0) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Empty passwd"); return -1; } if (!is_uuid_valid (repo_id)) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid repo id"); return -1; } retry: repo = seaf_repo_manager_get_repo (seaf->repo_mgr, repo_id); if (!repo) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "No such library"); return -1; } if (!repo->encrypted) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Repo not encrypted"); return -1; } if (repo->enc_version < 2) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Unsupported enc version"); return -1; } if (repo->pwd_hash_algo) { if (seafile_pwd_hash_verify_repo_passwd (repo->enc_version, repo_id, old_passwd, repo->salt, repo->pwd_hash, repo->pwd_hash_algo, repo->pwd_hash_params) < 0) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Incorrect password"); return -1; } } else { if (seafile_verify_repo_passwd (repo_id, old_passwd, repo->magic, repo->enc_version, repo->salt) < 0) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Incorrect password"); return -1; } } parent = seaf_commit_manager_get_commit (seaf->commit_mgr, repo->id, repo->version, repo->head->commit_id); if (!parent) { seaf_warning ("Failed to get commit %s:%s.\n", repo->id, repo->head->commit_id); ret = -1; goto out; } char new_magic[65], new_pwd_hash[65], new_random_key[97]; if (repo->pwd_hash_algo) { seafile_generate_pwd_hash (repo->enc_version, repo_id, new_passwd, repo->salt, repo->pwd_hash_algo, repo->pwd_hash_params, new_pwd_hash); } else { seafile_generate_magic (repo->enc_version, repo_id, new_passwd, repo->salt, new_magic); } if (seafile_update_random_key (old_passwd, repo->random_key, new_passwd, new_random_key, repo->enc_version, repo->salt) < 0) { ret = -1; goto out; } if (repo->pwd_hash_algo) { memcpy (repo->pwd_hash, new_pwd_hash, 64); } else { memcpy (repo->magic, new_magic, 64); } memcpy (repo->random_key, new_random_key, 96); commit = seaf_commit_new (NULL, repo->id, parent->root_id, user, EMPTY_SHA1, "Changed library password", 0); commit->parent_id = g_strdup(parent->commit_id); seaf_repo_to_commit (repo, commit); if (seaf_commit_manager_add_commit (seaf->commit_mgr, commit) < 0) { ret = -1; goto out; } seaf_branch_set_commit (repo->head, commit->commit_id); if (seaf_branch_manager_test_and_update_branch (seaf->branch_mgr, repo->head, parent->commit_id, FALSE, NULL, NULL, NULL) < 0) { seaf_repo_unref (repo); seaf_commit_unref (commit); seaf_commit_unref (parent); repo = NULL; commit = NULL; parent = NULL; goto retry; } if (seaf_passwd_manager_is_passwd_set (seaf->passwd_mgr, repo_id, user)) seaf_passwd_manager_set_passwd (seaf->passwd_mgr, repo_id, user, new_passwd, error); out: seaf_commit_unref (commit); seaf_commit_unref (parent); seaf_repo_unref (repo); return ret; } static void set_pwd_hash_to_commit (SeafCommit *commit, SeafRepo *repo, const char *pwd_hash, const char *pwd_hash_algo, const char *pwd_hash_params) { commit->repo_name = g_strdup (repo->name); commit->repo_desc = g_strdup (repo->desc); commit->encrypted = repo->encrypted; commit->repaired = repo->repaired; if (commit->encrypted) { commit->enc_version = repo->enc_version; if (commit->enc_version == 2) { commit->random_key = g_strdup (repo->random_key); } else if (commit->enc_version == 3) { commit->random_key = g_strdup (repo->random_key); commit->salt = g_strdup (repo->salt); } else if (commit->enc_version == 4) { commit->random_key = g_strdup (repo->random_key); commit->salt = g_strdup (repo->salt); } commit->pwd_hash = g_strdup (pwd_hash); commit->pwd_hash_algo = g_strdup (pwd_hash_algo); commit->pwd_hash_params = g_strdup (pwd_hash_params); } commit->no_local_history = repo->no_local_history; commit->version = repo->version; } int seafile_upgrade_repo_pwd_hash_algorithm (const char *repo_id, const char *user, const char *passwd, const char *pwd_hash_algo, const char *pwd_hash_params, GError **error) { SeafRepo *repo = NULL; SeafCommit *commit = NULL, *parent = NULL; int ret = 0; if (!user) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "No user given"); return -1; } if (!passwd || passwd[0] == 0) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Empty passwd"); return -1; } if (!is_uuid_valid (repo_id)) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid repo id"); return -1; } if (!pwd_hash_algo) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid pwd hash algorithm"); return -1; } if (g_strcmp0 (pwd_hash_algo, PWD_HASH_PDKDF2) != 0 && g_strcmp0 (pwd_hash_algo, PWD_HASH_ARGON2ID) != 0) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Unsupported pwd hash algorithm"); return -1; } if (!pwd_hash_params) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid pwd hash params"); return -1; } retry: repo = seaf_repo_manager_get_repo (seaf->repo_mgr, repo_id); if (!repo) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "No such library"); return -1; } if (g_strcmp0 (pwd_hash_algo, repo->pwd_hash_algo) == 0 && g_strcmp0 (pwd_hash_params, repo->pwd_hash_params) == 0) { goto out; } if (!repo->encrypted) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Repo not encrypted"); ret = -1; goto out; } if (repo->pwd_hash_algo) { if (seafile_pwd_hash_verify_repo_passwd (repo->enc_version, repo_id, passwd, repo->salt, repo->pwd_hash, repo->pwd_hash_algo, repo->pwd_hash_params) < 0) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Incorrect password"); ret = 1; goto out; } } else { if (seafile_verify_repo_passwd (repo_id, passwd, repo->magic, repo->enc_version, repo->salt) < 0) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Incorrect password"); ret = -1; goto out; } } parent = seaf_commit_manager_get_commit (seaf->commit_mgr, repo->id, repo->version, repo->head->commit_id); if (!parent) { seaf_warning ("Failed to get commit %s:%s.\n", repo->id, repo->head->commit_id); ret = -1; goto out; } char new_pwd_hash[65]= {0}; seafile_generate_pwd_hash (repo->enc_version, repo_id, passwd, repo->salt, pwd_hash_algo, pwd_hash_params, new_pwd_hash); // To prevent clients that have already synced this repo from overwriting the modified encryption algorithm, // delete all sync tokens. if (seaf_delete_repo_tokens (repo) < 0) { seaf_warning ("Failed to delete repo sync tokens, abort change pwd hash algorithm.\n"); ret = -1; goto out; } memcpy (repo->pwd_hash, new_pwd_hash, 64); commit = seaf_commit_new (NULL, repo->id, parent->root_id, user, EMPTY_SHA1, "Changed library password hash algorithm", 0); commit->parent_id = g_strdup(parent->commit_id); set_pwd_hash_to_commit (commit, repo, new_pwd_hash, pwd_hash_algo, pwd_hash_params); if (seaf_commit_manager_add_commit (seaf->commit_mgr, commit) < 0) { ret = -1; goto out; } seaf_branch_set_commit (repo->head, commit->commit_id); if (seaf_branch_manager_test_and_update_branch (seaf->branch_mgr, repo->head, parent->commit_id, FALSE, NULL, NULL, NULL) < 0) { seaf_repo_unref (repo); seaf_commit_unref (commit); seaf_commit_unref (parent); repo = NULL; commit = NULL; parent = NULL; goto retry; } if (seaf_passwd_manager_is_passwd_set (seaf->passwd_mgr, repo_id, user)) seaf_passwd_manager_set_passwd (seaf->passwd_mgr, repo_id, user, passwd, error); out: seaf_commit_unref (commit); seaf_commit_unref (parent); seaf_repo_unref (repo); return ret; } int seafile_is_repo_owner (const char *email, const char *repo_id, GError **error) { if (!is_uuid_valid (repo_id)) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid repo id"); return 0; } char *owner = seaf_repo_manager_get_repo_owner (seaf->repo_mgr, repo_id); if (!owner) { /* seaf_warning ("Failed to get owner info for repo %s.\n", repo_id); */ return 0; } if (strcmp(owner, email) != 0) { g_free (owner); return 0; } g_free (owner); return 1; } int seafile_set_repo_owner(const char *repo_id, const char *email, GError **error) { if (!repo_id || !email) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Argument should not be null"); return -1; } if (!is_uuid_valid (repo_id)) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid repo id"); return -1; } return seaf_repo_manager_set_repo_owner(seaf->repo_mgr, repo_id, email); } char * seafile_get_repo_owner (const char *repo_id, GError **error) { if (!repo_id) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Argument should not be null"); return NULL; } if (!is_uuid_valid (repo_id)) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid repo id"); return NULL; } char *owner = seaf_repo_manager_get_repo_owner (seaf->repo_mgr, repo_id); /* if (!owner){ */ /* seaf_warning ("Failed to get repo owner for repo %s.\n", repo_id); */ /* } */ return owner; } GList * seafile_get_orphan_repo_list(GError **error) { GList *ret = NULL; GList *repos, *ptr; repos = seaf_repo_manager_get_orphan_repo_list(seaf->repo_mgr); ret = convert_repo_list (repos); for (ptr = repos; ptr; ptr = ptr->next) { seaf_repo_unref ((SeafRepo *)ptr->data); } g_list_free (repos); return ret; } GList * seafile_list_owned_repos (const char *email, int ret_corrupted, int start, int limit, GError **error) { GList *ret = NULL; GList *repos, *ptr; repos = seaf_repo_manager_get_repos_by_owner (seaf->repo_mgr, email, ret_corrupted, start, limit, NULL); ret = convert_repo_list (repos); /* for (ptr = ret; ptr; ptr = ptr->next) { */ /* g_object_get (ptr->data, "repo_id", &repo_id, NULL); */ /* is_shared = seaf_share_manager_is_repo_shared (seaf->share_mgr, repo_id); */ /* if (is_shared < 0) { */ /* g_free (repo_id); */ /* break; */ /* } else { */ /* g_object_set (ptr->data, "is_shared", is_shared, NULL); */ /* g_free (repo_id); */ /* } */ /* } */ /* while (ptr) { */ /* g_object_set (ptr->data, "is_shared", FALSE, NULL); */ /* ptr = ptr->prev; */ /* } */ for(ptr = repos; ptr; ptr = ptr->next) { seaf_repo_unref ((SeafRepo *)ptr->data); } g_list_free (repos); return ret; } GList * seafile_search_repos_by_name (const char *name, GError **error) { GList *ret = NULL; GList *repos, *ptr; repos = seaf_repo_manager_search_repos_by_name (seaf->repo_mgr, name); ret = convert_repo_list (repos); for (ptr = repos; ptr; ptr = ptr->next) { seaf_repo_unref ((SeafRepo *)ptr->data); } g_list_free (repos); return g_list_reverse(ret); } gint64 seafile_get_user_quota_usage (const char *email, GError **error) { gint64 ret; if (!email) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Bad user id"); return -1; } ret = seaf_quota_manager_get_user_usage (seaf->quota_mgr, email); if (ret < 0) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "Internal server error"); return -1; } return ret; } gint64 seafile_get_user_share_usage (const char *email, GError **error) { gint64 ret; if (!email) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Bad user id"); return -1; } ret = seaf_quota_manager_get_user_share_usage (seaf->quota_mgr, email); if (ret < 0) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "Internal server error"); return -1; } return ret; } gint64 seafile_server_repo_size(const char *repo_id, GError **error) { gint64 ret; if (!repo_id || strlen(repo_id) != 36) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Bad repo id"); return -1; } if (!is_uuid_valid (repo_id)) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid repo id"); return -1; } ret = seaf_repo_manager_get_repo_size (seaf->repo_mgr, repo_id); if (ret < 0) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "Internal server error"); return -1; } return ret; } int seafile_set_repo_history_limit (const char *repo_id, int days, GError **error) { if (!repo_id || !is_uuid_valid (repo_id)) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid repo id"); return -1; } if (!is_uuid_valid (repo_id)) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid repo id"); return -1; } if (seaf_repo_manager_set_repo_history_limit (seaf->repo_mgr, repo_id, days) < 0) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_INTERNAL, "DB Error"); return -1; } return 0; } int seafile_get_repo_history_limit (const char *repo_id, GError **error) { if (!repo_id || !is_uuid_valid (repo_id)) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid repo id"); return -1; } if (!is_uuid_valid (repo_id)) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid repo id"); return -1; } return seaf_repo_manager_get_repo_history_limit (seaf->repo_mgr, repo_id); } int seafile_set_repo_valid_since (const char *repo_id, gint64 timestamp, GError **error) { return seaf_repo_manager_set_repo_valid_since (seaf->repo_mgr, repo_id, timestamp); } int seafile_repo_set_access_property (const char *repo_id, const char *ap, GError **error) { int ret; if (!repo_id) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Argument should not be null"); return -1; } if (strlen(repo_id) != 36) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Wrong repo id"); return -1; } if (g_strcmp0(ap, "public") != 0 && g_strcmp0(ap, "own") != 0 && g_strcmp0(ap, "private") != 0) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Wrong access property"); return -1; } ret = seaf_repo_manager_set_access_property (seaf->repo_mgr, repo_id, ap); if (ret < 0) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "Internal server error"); return -1; } return ret; } char * seafile_repo_query_access_property (const char *repo_id, GError **error) { char *ret; if (!repo_id) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Argument should not be null"); return NULL; } if (strlen(repo_id) != 36) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Wrong repo id"); return NULL; } ret = seaf_repo_manager_query_access_property (seaf->repo_mgr, repo_id); return ret; } char * seafile_web_get_access_token (const char *repo_id, const char *obj_id, const char *op, const char *username, int use_onetime, GError **error) { char *token; if (!repo_id || !obj_id || !op || !username) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Missing args"); return NULL; } token = seaf_web_at_manager_get_access_token (seaf->web_at_mgr, repo_id, obj_id, op, username, use_onetime, error); return token; } GObject * seafile_web_query_access_token (const char *token, GError **error) { SeafileWebAccess *webaccess = NULL; if (!token) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Token should not be null"); return NULL; } webaccess = seaf_web_at_manager_query_access_token (seaf->web_at_mgr, token); if (webaccess) return (GObject *)webaccess; return NULL; } char * seafile_query_zip_progress (const char *token, GError **error) { #ifdef HAVE_EVHTP return zip_download_mgr_query_zip_progress (seaf->zip_download_mgr, token, error); #else return NULL; #endif } int seafile_cancel_zip_task (const char *token, GError **error) { #ifdef HAVE_EVHTP return zip_download_mgr_cancel_zip_task (seaf->zip_download_mgr, token); #else return 0; #endif } int seafile_add_share (const char *repo_id, const char *from_email, const char *to_email, const char *permission, GError **error) { int ret; if (!repo_id || !from_email || !to_email || !permission) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Missing args"); return -1; } if (!is_uuid_valid (repo_id)) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid repo_id parameter"); return -1; } if (g_strcmp0 (from_email, to_email) == 0) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Can not share repo to myself"); return -1; } if (!is_permission_valid (permission)) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid permission parameter"); return -1; } ret = seaf_share_manager_add_share (seaf->share_mgr, repo_id, from_email, to_email, permission); return ret; } GList * seafile_list_share_repos (const char *email, const char *type, int start, int limit, GError **error) { if (g_strcmp0 (type, "from_email") != 0 && g_strcmp0 (type, "to_email") != 0 ) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Wrong type argument"); return NULL; } return seaf_share_manager_list_share_repos (seaf->share_mgr, email, type, start, limit, NULL); } GList * seafile_list_repo_shared_to (const char *from_user, const char *repo_id, GError **error) { if (!from_user || !repo_id) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Missing args"); return NULL; } if (!is_uuid_valid (repo_id)) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid repo id"); return NULL; } return seaf_share_manager_list_repo_shared_to (seaf->share_mgr, from_user, repo_id, error); } char * seafile_share_subdir_to_user (const char *repo_id, const char *path, const char *owner, const char *share_user, const char *permission, const char *passwd, GError **error) { if (is_empty_string (repo_id) || !is_uuid_valid (repo_id)) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid repo_id parameter"); return NULL; } if (is_empty_string (path) || strcmp (path, "/") == 0) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid path parameter"); return NULL; } if (is_empty_string (owner)) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid owner parameter"); return NULL; } if (is_empty_string (share_user)) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid share_user parameter"); return NULL; } if (strcmp (owner, share_user) == 0) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Can't share subdir to myself"); return NULL; } if (!is_permission_valid (permission)) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid permission parameter"); return NULL; } char *real_path; char *vrepo_name; char *vrepo_id; char *ret = NULL; real_path = format_dir_path (path); // Use subdir name as virtual repo name and description vrepo_name = g_path_get_basename (real_path); vrepo_id = seaf_repo_manager_create_virtual_repo (seaf->repo_mgr, repo_id, real_path, vrepo_name, vrepo_name, owner, passwd, error); if (!vrepo_id) goto out; int result = seaf_share_manager_add_share (seaf->share_mgr, vrepo_id, owner, share_user, permission); if (result < 0) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "Failed to share subdir to user"); g_free (vrepo_id); } else ret = vrepo_id; out: g_free (vrepo_name); g_free (real_path); return ret; } int seafile_unshare_subdir_for_user (const char *repo_id, const char *path, const char *owner, const char *share_user, GError **error) { if (is_empty_string (repo_id) || !is_uuid_valid (repo_id)) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid repo_id parameter"); return -1; } if (is_empty_string (path) || strcmp (path, "/") == 0) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid path parameter"); return -1; } if (is_empty_string (owner)) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid owner parameter"); return -1; } if (is_empty_string (share_user) || strcmp (owner, share_user) == 0) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid share_user parameter"); return -1; } char *real_path; int ret = 0; real_path = format_dir_path (path); ret = seaf_share_manager_unshare_subdir (seaf->share_mgr, repo_id, real_path, owner, share_user); if (ret < 0) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "Failed to unshare subdir for user"); } g_free (real_path); return ret; } int seafile_update_share_subdir_perm_for_user (const char *repo_id, const char *path, const char *owner, const char *share_user, const char *permission, GError **error) { if (is_empty_string (repo_id) || !is_uuid_valid (repo_id)) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid repo_id parameter"); return -1; } if (is_empty_string (path) || strcmp (path, "/") == 0) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid path parameter"); return -1; } if (is_empty_string (owner)) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid owner parameter"); return -1; } if (is_empty_string (share_user) || strcmp (owner, share_user) == 0) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid share_user parameter"); return -1; } if (!is_permission_valid (permission)) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid permission parameter"); return -1; } char *real_path; int ret = 0; real_path = format_dir_path (path); ret = seaf_share_manager_set_subdir_perm_by_path (seaf->share_mgr, repo_id, owner, share_user, permission, real_path); if (ret < 0) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "Failed to update share subdir permission for user"); } g_free (real_path); return ret; } GList * seafile_list_repo_shared_group (const char *from_user, const char *repo_id, GError **error) { if (!from_user || !repo_id) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Missing args"); return NULL; } if (!is_uuid_valid (repo_id)) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid repo id"); return NULL; } return seaf_share_manager_list_repo_shared_group (seaf->share_mgr, from_user, repo_id, error); } int seafile_remove_share (const char *repo_id, const char *from_email, const char *to_email, GError **error) { int ret; if (!repo_id || !from_email ||!to_email) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Missing args"); return -1; } ret = seaf_share_manager_remove_share (seaf->share_mgr, repo_id, from_email, to_email); return ret; } /* Group repo RPC. */ int seafile_group_share_repo (const char *repo_id, int group_id, const char *user_name, const char *permission, GError **error) { SeafRepoManager *mgr = seaf->repo_mgr; int ret; if (group_id <= 0 || !user_name || !repo_id || !permission) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Bad input argument"); return -1; } if (!is_uuid_valid (repo_id)) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid repo id"); return -1; } if (!is_permission_valid (permission)) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid permission parameter"); return -1; } ret = seaf_repo_manager_add_group_repo (mgr, repo_id, group_id, user_name, permission, error); return ret; } int seafile_group_unshare_repo (const char *repo_id, int group_id, const char *user_name, GError **error) { SeafRepoManager *mgr = seaf->repo_mgr; int ret; if (!user_name || !repo_id) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "User name and repo id can not be NULL"); return -1; } if (!is_uuid_valid (repo_id)) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid repo id"); return -1; } ret = seaf_repo_manager_del_group_repo (mgr, repo_id, group_id, error); return ret; } char * seafile_share_subdir_to_group (const char *repo_id, const char *path, const char *owner, int share_group, const char *permission, const char *passwd, GError **error) { if (is_empty_string (repo_id) || !is_uuid_valid (repo_id)) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid repo_id parameter"); return NULL; } if (is_empty_string (path) || strcmp (path, "/") == 0) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid path parameter"); return NULL; } if (is_empty_string (owner)) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid owner parameter"); return NULL; } if (share_group < 0) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid share_group parameter"); return NULL; } if (!is_permission_valid (permission)) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid permission parameter"); return NULL; } char *real_path; char *vrepo_name; char *vrepo_id; char* ret = NULL; real_path = format_dir_path (path); // Use subdir name as virtual repo name and description vrepo_name = g_path_get_basename (real_path); vrepo_id = seaf_repo_manager_create_virtual_repo (seaf->repo_mgr, repo_id, real_path, vrepo_name, vrepo_name, owner, passwd, error); if (!vrepo_id) goto out; int result = seaf_repo_manager_add_group_repo (seaf->repo_mgr, vrepo_id, share_group, owner, permission, error); if (result < 0) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "Failed to share subdir to group"); g_free (vrepo_id); } else ret = vrepo_id; out: g_free (vrepo_name); g_free (real_path); return ret; } int seafile_unshare_subdir_for_group (const char *repo_id, const char *path, const char *owner, int share_group, GError **error) { if (is_empty_string (repo_id) || !is_uuid_valid (repo_id)) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid repo_id parameter"); return -1; } if (is_empty_string (path) || strcmp (path, "/") == 0) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid path parameter"); return -1; } if (is_empty_string (owner)) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid owner parameter"); return -1; } if (share_group < 0) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid share_group parameter"); return -1; } char *real_path; int ret = 0; real_path = format_dir_path (path); ret = seaf_share_manager_unshare_group_subdir (seaf->share_mgr, repo_id, real_path, owner, share_group); if (ret < 0) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "Failed to unshare subdir for group"); } g_free (real_path); return ret; } int seafile_update_share_subdir_perm_for_group (const char *repo_id, const char *path, const char *owner, int share_group, const char *permission, GError **error) { if (is_empty_string (repo_id) || !is_uuid_valid (repo_id)) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid repo_id parameter"); return -1; } if (is_empty_string (path) || strcmp (path, "/") == 0) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid path parameter"); return -1; } if (is_empty_string (owner)) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid owner parameter"); return -1; } if (share_group < 0) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid share_group parameter"); return -1; } if (!is_permission_valid (permission)) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid permission parameter"); return -1; } char *real_path; int ret = 0; real_path = format_dir_path (path); ret = seaf_repo_manager_set_subdir_group_perm_by_path (seaf->repo_mgr, repo_id, owner, share_group, permission, real_path); if (ret < 0) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "Failed to update share subdir permission for group"); } g_free (real_path); return ret; } char * seafile_get_shared_groups_by_repo(const char *repo_id, GError **error) { SeafRepoManager *mgr = seaf->repo_mgr; GList *group_ids = NULL, *ptr; GString *result; if (!repo_id) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Bad arguments"); return NULL; } if (!is_uuid_valid (repo_id)) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid repo id"); return NULL; } group_ids = seaf_repo_manager_get_groups_by_repo (mgr, repo_id, error); if (!group_ids) { return NULL; } result = g_string_new(""); ptr = group_ids; while (ptr) { g_string_append_printf (result, "%d\n", (int)(long)ptr->data); ptr = ptr->next; } g_list_free (group_ids); return g_string_free (result, FALSE); } char * seafile_get_group_repoids (int group_id, GError **error) { SeafRepoManager *mgr = seaf->repo_mgr; GList *repo_ids = NULL, *ptr; GString *result; repo_ids = seaf_repo_manager_get_group_repoids (mgr, group_id, error); if (!repo_ids) { return NULL; } result = g_string_new(""); ptr = repo_ids; while (ptr) { g_string_append_printf (result, "%s\n", (char *)ptr->data); g_free (ptr->data); ptr = ptr->next; } g_list_free (repo_ids); return g_string_free (result, FALSE); } GList * seafile_get_repos_by_group (int group_id, GError **error) { SeafRepoManager *mgr = seaf->repo_mgr; GList *ret = NULL; if (group_id < 0) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid group id."); return NULL; } ret = seaf_repo_manager_get_repos_by_group (mgr, group_id, error); return ret; } GList * seafile_get_group_repos_by_owner (char *user, GError **error) { SeafRepoManager *mgr = seaf->repo_mgr; GList *ret = NULL; if (!user) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "user name can not be NULL"); return NULL; } ret = seaf_repo_manager_get_group_repos_by_owner (mgr, user, error); if (!ret) { return NULL; } return g_list_reverse (ret); } char * seafile_get_group_repo_owner (const char *repo_id, GError **error) { SeafRepoManager *mgr = seaf->repo_mgr; GString *result = g_string_new (""); if (!is_uuid_valid (repo_id)) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid repo id"); return NULL; } char *share_from = seaf_repo_manager_get_group_repo_owner (mgr, repo_id, error); if (share_from) { g_string_append_printf (result, "%s", share_from); g_free (share_from); } return g_string_free (result, FALSE); } int seafile_remove_repo_group(int group_id, const char *username, GError **error) { if (group_id <= 0) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Wrong group id argument"); return -1; } return seaf_repo_manager_remove_group_repos (seaf->repo_mgr, group_id, username, error); } /* Inner public repo RPC */ int seafile_set_inner_pub_repo (const char *repo_id, const char *permission, GError **error) { if (!repo_id) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Bad args"); return -1; } if (!is_uuid_valid (repo_id)) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid repo id"); return -1; } if (seaf_repo_manager_set_inner_pub_repo (seaf->repo_mgr, repo_id, permission) < 0) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "Internal error"); return -1; } return 0; } int seafile_unset_inner_pub_repo (const char *repo_id, GError **error) { if (!repo_id) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Bad args"); return -1; } if (!is_uuid_valid (repo_id)) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid repo id"); return -1; } if (seaf_repo_manager_unset_inner_pub_repo (seaf->repo_mgr, repo_id) < 0) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "Internal error"); return -1; } return 0; } GList * seafile_list_inner_pub_repos (GError **error) { return seaf_repo_manager_list_inner_pub_repos (seaf->repo_mgr, NULL); } gint64 seafile_count_inner_pub_repos (GError **error) { return seaf_repo_manager_count_inner_pub_repos (seaf->repo_mgr); } GList * seafile_list_inner_pub_repos_by_owner (const char *user, GError **error) { if (!user) { g_set_error (error, 0, SEAF_ERR_BAD_ARGS, "Bad arguments"); return NULL; } return seaf_repo_manager_list_inner_pub_repos_by_owner (seaf->repo_mgr, user); } int seafile_is_inner_pub_repo (const char *repo_id, GError **error) { if (!repo_id) { g_set_error (error, 0, SEAF_ERR_BAD_ARGS, "Bad arguments"); return -1; } if (!is_uuid_valid (repo_id)) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid repo id"); return -1; } return seaf_repo_manager_is_inner_pub_repo (seaf->repo_mgr, repo_id); } gint64 seafile_get_file_size (const char *store_id, int version, const char *file_id, GError **error) { gint64 file_size; if (!store_id || !is_uuid_valid(store_id)) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid store id"); return -1; } if (!file_id || !is_object_id_valid (file_id)) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid file id"); return -1; } file_size = seaf_fs_manager_get_file_size (seaf->fs_mgr, store_id, version, file_id); if (file_size < 0) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_INTERNAL, "failed to read file size"); return -1; } return file_size; } gint64 seafile_get_dir_size (const char *store_id, int version, const char *dir_id, GError **error) { gint64 dir_size; if (!store_id || !is_uuid_valid (store_id)) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid store id"); return -1; } if (!dir_id || !is_object_id_valid (dir_id)) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid dir id"); return -1; } dir_size = seaf_fs_manager_get_fs_size (seaf->fs_mgr, store_id, version, dir_id); if (dir_size < 0) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Failed to caculate dir size"); return -1; } return dir_size; } int seafile_check_passwd (const char *repo_id, const char *magic, GError **error) { if (!repo_id || strlen(repo_id) != 36 || !magic) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Bad arguments"); return -1; } if (seaf_passwd_manager_check_passwd (seaf->passwd_mgr, repo_id, magic, error) < 0) { return -1; } return 0; } int seafile_set_passwd (const char *repo_id, const char *user, const char *passwd, GError **error) { if (!repo_id || strlen(repo_id) != 36 || !user || !passwd) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Bad arguments"); return -1; } if (seaf_passwd_manager_set_passwd (seaf->passwd_mgr, repo_id, user, passwd, error) < 0) { return -1; } return 0; } int seafile_unset_passwd (const char *repo_id, const char *user, GError **error) { if (!repo_id || strlen(repo_id) != 36 || !user) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Bad arguments"); return -1; } if (seaf_passwd_manager_unset_passwd (seaf->passwd_mgr, repo_id, user, error) < 0) { return -1; } return 0; } int seafile_is_passwd_set (const char *repo_id, const char *user, GError **error) { if (!repo_id || strlen(repo_id) != 36 || !user) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Bad arguments"); return -1; } return seaf_passwd_manager_is_passwd_set (seaf->passwd_mgr, repo_id, user); } GObject * seafile_get_decrypt_key (const char *repo_id, const char *user, GError **error) { SeafileCryptKey *ret; if (!repo_id || strlen(repo_id) != 36 || !user) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Bad arguments"); return NULL; } ret = seaf_passwd_manager_get_decrypt_key (seaf->passwd_mgr, repo_id, user); if (!ret) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "Password was not set"); return NULL; } return (GObject *)ret; } int seafile_revert_on_server (const char *repo_id, const char *commit_id, const char *user_name, GError **error) { if (!repo_id || strlen(repo_id) != 36 || !commit_id || strlen(commit_id) != 40 || !user_name) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Bad arguments"); return -1; } if (!is_uuid_valid (repo_id)) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid repo id"); return -1; } if (!is_object_id_valid (commit_id)) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid commit id"); return -1; } return seaf_repo_manager_revert_on_server (seaf->repo_mgr, repo_id, commit_id, user_name, error); } int seafile_post_file (const char *repo_id, const char *temp_file_path, const char *parent_dir, const char *file_name, const char *user, GError **error) { char *norm_parent_dir = NULL, *norm_file_name = NULL, *rpath = NULL; int ret = 0; if (!repo_id || !temp_file_path || !parent_dir || !file_name || !user) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Argument should not be null"); return -1; } if (!is_uuid_valid (repo_id)) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid repo id"); return -1; } norm_parent_dir = normalize_utf8_path (parent_dir); if (!norm_parent_dir) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Path is in valid UTF8 encoding"); ret = -1; goto out; } norm_file_name = normalize_utf8_path (file_name); if (!norm_file_name) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Path is in valid UTF8 encoding"); ret = -1; goto out; } rpath = format_dir_path (norm_parent_dir); if (seaf_repo_manager_post_file (seaf->repo_mgr, repo_id, temp_file_path, rpath, norm_file_name, user, error) < 0) { ret = -1; } out: g_free (norm_parent_dir); g_free (norm_file_name); g_free (rpath); return ret; } /* char * */ /* seafile_post_file_blocks (const char *repo_id, */ /* const char *parent_dir, */ /* const char *file_name, */ /* const char *blockids_json, */ /* const char *paths_json, */ /* const char *user, */ /* gint64 file_size, */ /* int replace_existed, */ /* GError **error) */ /* { */ /* char *norm_parent_dir = NULL, *norm_file_name = NULL, *rpath = NULL; */ /* char *new_id = NULL; */ /* if (!repo_id || !parent_dir || !file_name */ /* || !blockids_json || ! paths_json || !user || file_size < 0) { */ /* g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, */ /* "Argument should not be null"); */ /* return NULL; */ /* } */ /* if (!is_uuid_valid (repo_id)) { */ /* g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid repo id"); */ /* return NULL; */ /* } */ /* norm_parent_dir = normalize_utf8_path (parent_dir); */ /* if (!norm_parent_dir) { */ /* g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, */ /* "Path is in valid UTF8 encoding"); */ /* goto out; */ /* } */ /* norm_file_name = normalize_utf8_path (file_name); */ /* if (!norm_file_name) { */ /* g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, */ /* "Path is in valid UTF8 encoding"); */ /* goto out; */ /* } */ /* rpath = format_dir_path (norm_parent_dir); */ /* seaf_repo_manager_post_file_blocks (seaf->repo_mgr, */ /* repo_id, */ /* rpath, */ /* norm_file_name, */ /* blockids_json, */ /* paths_json, */ /* user, */ /* file_size, */ /* replace_existed, */ /* &new_id, */ /* error); */ /* out: */ /* g_free (norm_parent_dir); */ /* g_free (norm_file_name); */ /* g_free (rpath); */ /* return new_id; */ /* } */ char * seafile_post_multi_files (const char *repo_id, const char *parent_dir, const char *filenames_json, const char *paths_json, const char *user, int replace_existed, GError **error) { char *norm_parent_dir = NULL, *rpath = NULL; char *ret_json = NULL; if (!repo_id || !filenames_json || !parent_dir || !paths_json || !user) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Argument should not be null"); return NULL; } if (!is_uuid_valid (repo_id)) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid repo id"); return NULL; } norm_parent_dir = normalize_utf8_path (parent_dir); if (!norm_parent_dir) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Path is in valid UTF8 encoding"); goto out; } rpath = format_dir_path (norm_parent_dir); seaf_repo_manager_post_multi_files (seaf->repo_mgr, repo_id, rpath, filenames_json, paths_json, user, replace_existed, 0, &ret_json, NULL, error); out: g_free (norm_parent_dir); g_free (rpath); return ret_json; } char * seafile_put_file (const char *repo_id, const char *temp_file_path, const char *parent_dir, const char *file_name, const char *user, const char *head_id, GError **error) { char *norm_parent_dir = NULL, *norm_file_name = NULL, *rpath = NULL; char *new_file_id = NULL; if (!repo_id || !temp_file_path || !parent_dir || !file_name || !user) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Argument should not be null"); return NULL; } if (!is_uuid_valid (repo_id)) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid repo id"); return NULL; } norm_parent_dir = normalize_utf8_path (parent_dir); if (!norm_parent_dir) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Path is in valid UTF8 encoding"); goto out; } norm_file_name = normalize_utf8_path (file_name); if (!norm_file_name) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Path is in valid UTF8 encoding"); goto out; } rpath = format_dir_path (norm_parent_dir); seaf_repo_manager_put_file (seaf->repo_mgr, repo_id, temp_file_path, rpath, norm_file_name, user, head_id, 0, &new_file_id, error); out: g_free (norm_parent_dir); g_free (norm_file_name); g_free (rpath); return new_file_id; } /* char * */ /* seafile_put_file_blocks (const char *repo_id, const char *parent_dir, */ /* const char *file_name, const char *blockids_json, */ /* const char *paths_json, const char *user, */ /* const char *head_id, gint64 file_size, GError **error) */ /* { */ /* char *norm_parent_dir = NULL, *norm_file_name = NULL, *rpath = NULL; */ /* char *new_file_id = NULL; */ /* if (!repo_id || !parent_dir || !file_name */ /* || !blockids_json || ! paths_json || !user) { */ /* g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, */ /* "Argument should not be null"); */ /* return NULL; */ /* } */ /* if (!is_uuid_valid (repo_id)) { */ /* g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid repo id"); */ /* return NULL; */ /* } */ /* norm_parent_dir = normalize_utf8_path (parent_dir); */ /* if (!norm_parent_dir) { */ /* g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, */ /* "Path is in valid UTF8 encoding"); */ /* goto out; */ /* } */ /* norm_file_name = normalize_utf8_path (file_name); */ /* if (!norm_file_name) { */ /* g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, */ /* "Path is in valid UTF8 encoding"); */ /* goto out; */ /* } */ /* rpath = format_dir_path (norm_parent_dir); */ /* seaf_repo_manager_put_file_blocks (seaf->repo_mgr, repo_id, */ /* rpath, norm_file_name, */ /* blockids_json, paths_json, */ /* user, head_id, file_size, */ /* &new_file_id, error); */ /* out: */ /* g_free (norm_parent_dir); */ /* g_free (norm_file_name); */ /* g_free (rpath); */ /* return new_file_id; */ /* } */ int seafile_post_dir (const char *repo_id, const char *parent_dir, const char *new_dir_name, const char *user, GError **error) { char *norm_parent_dir = NULL, *norm_dir_name = NULL, *rpath = NULL; int ret = 0; if (!repo_id || !parent_dir || !new_dir_name || !user) { g_set_error (error, 0, SEAF_ERR_BAD_ARGS, "Argument should not be null"); return -1; } if (!is_uuid_valid (repo_id)) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid repo id"); return -1; } norm_parent_dir = normalize_utf8_path (parent_dir); if (!norm_parent_dir) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Path is in valid UTF8 encoding"); ret = -1; goto out; } norm_dir_name = normalize_utf8_path (new_dir_name); if (!norm_dir_name) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Path is in valid UTF8 encoding"); ret = -1; goto out; } rpath = format_dir_path (norm_parent_dir); if (seaf_repo_manager_post_dir (seaf->repo_mgr, repo_id, rpath, norm_dir_name, user, error) < 0) { ret = -1; } out: g_free (norm_parent_dir); g_free (norm_dir_name); g_free (rpath); return ret; } int seafile_post_empty_file (const char *repo_id, const char *parent_dir, const char *new_file_name, const char *user, GError **error) { char *norm_parent_dir = NULL, *norm_file_name = NULL, *rpath = NULL; int ret = 0; if (!repo_id || !parent_dir || !new_file_name || !user) { g_set_error (error, 0, SEAF_ERR_BAD_ARGS, "Argument should not be null"); return -1; } if (!is_uuid_valid (repo_id)) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid repo id"); return -1; } norm_parent_dir = normalize_utf8_path (parent_dir); if (!norm_parent_dir) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Path is in valid UTF8 encoding"); ret = -1; goto out; } norm_file_name = normalize_utf8_path (new_file_name); if (!norm_file_name) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Path is in valid UTF8 encoding"); ret = -1; goto out; } rpath = format_dir_path (norm_parent_dir); if (seaf_repo_manager_post_empty_file (seaf->repo_mgr, repo_id, rpath, norm_file_name, user, error) < 0) { ret = -1; } out: g_free (norm_parent_dir); g_free (norm_file_name); g_free (rpath); return ret; } int seafile_del_file (const char *repo_id, const char *parent_dir, const char *file_name, const char *user, GError **error) { char *norm_parent_dir = NULL, *norm_file_name = NULL, *rpath = NULL; int ret = 0; if (!repo_id || !parent_dir || !file_name || !user) { g_set_error (error, 0, SEAF_ERR_BAD_ARGS, "Argument should not be null"); return -1; } if (!is_uuid_valid (repo_id)) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid repo id"); return -1; } norm_parent_dir = normalize_utf8_path (parent_dir); if (!norm_parent_dir) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Path is in valid UTF8 encoding"); ret = -1; goto out; } norm_file_name = normalize_utf8_path (file_name); if (!norm_file_name) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Path is in valid UTF8 encoding"); ret = -1; goto out; } rpath = format_dir_path (norm_parent_dir); if (seaf_repo_manager_del_file (seaf->repo_mgr, repo_id, rpath, norm_file_name, user, error) < 0) { ret = -1; } out: g_free (norm_parent_dir); g_free (norm_file_name); g_free (rpath); return ret; } int seafile_batch_del_files (const char *repo_id, const char *filepaths, const char *user, GError **error) { char *norm_file_list = NULL, *rpath = NULL; int ret = 0; if (!repo_id || !filepaths || !user) { g_set_error (error, 0, SEAF_ERR_BAD_ARGS, "Argument should not be null"); return -1; } if (!is_uuid_valid (repo_id)) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid repo id"); return -1; } norm_file_list = normalize_utf8_path (filepaths); if (!norm_file_list) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Path is in valid UTF8 encoding"); ret = -1; goto out; } if (seaf_repo_manager_batch_del_files (seaf->repo_mgr, repo_id, norm_file_list, user, error) < 0) { ret = -1; } out: g_free (norm_file_list); return ret; } GObject * seafile_copy_file (const char *src_repo_id, const char *src_dir, const char *src_filename, const char *dst_repo_id, const char *dst_dir, const char *dst_filename, const char *user, int need_progress, int synchronous, GError **error) { char *norm_src_dir = NULL, *norm_src_filename = NULL; char *norm_dst_dir = NULL, *norm_dst_filename = NULL; char *rsrc_dir = NULL, *rdst_dir = NULL; GObject *ret = NULL; if (!src_repo_id || !src_dir || !src_filename || !dst_repo_id || !dst_dir || !dst_filename || !user) { g_set_error (error, 0, SEAF_ERR_BAD_ARGS, "Argument should not be null"); return NULL; } if (!is_uuid_valid (src_repo_id) || !is_uuid_valid(dst_repo_id)) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid repo id"); return NULL; } norm_src_dir = normalize_utf8_path (src_dir); if (!norm_src_dir) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Path is in valid UTF8 encoding"); goto out; } norm_src_filename = normalize_utf8_path (src_filename); if (!norm_src_filename) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Path is in valid UTF8 encoding"); goto out; } norm_dst_dir = normalize_utf8_path (dst_dir); if (!norm_dst_dir) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Path is in valid UTF8 encoding"); goto out; } norm_dst_filename = normalize_utf8_path (dst_filename); if (!norm_dst_filename) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Path is in valid UTF8 encoding"); goto out; } rsrc_dir = format_dir_path (norm_src_dir); rdst_dir = format_dir_path (norm_dst_dir); ret = (GObject *)seaf_repo_manager_copy_multiple_files (seaf->repo_mgr, src_repo_id, rsrc_dir, norm_src_filename, dst_repo_id, rdst_dir, norm_dst_filename, user, need_progress, synchronous, error); out: g_free (norm_src_dir); g_free (norm_src_filename); g_free (norm_dst_dir); g_free (norm_dst_filename); g_free (rsrc_dir); g_free (rdst_dir); return ret; } GObject * seafile_move_file (const char *src_repo_id, const char *src_dir, const char *src_filename, const char *dst_repo_id, const char *dst_dir, const char *dst_filename, int replace, const char *user, int need_progress, int synchronous, GError **error) { char *norm_src_dir = NULL, *norm_src_filename = NULL; char *norm_dst_dir = NULL, *norm_dst_filename = NULL; char *rsrc_dir = NULL, *rdst_dir = NULL; GObject *ret = NULL; if (!src_repo_id || !src_dir || !src_filename || !dst_repo_id || !dst_dir || !dst_filename || !user) { g_set_error (error, 0, SEAF_ERR_BAD_ARGS, "Argument should not be null"); return NULL; } if (!is_uuid_valid (src_repo_id) || !is_uuid_valid(dst_repo_id)) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid repo id"); return NULL; } norm_src_dir = normalize_utf8_path (src_dir); if (!norm_src_dir) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Path is in valid UTF8 encoding"); goto out; } norm_src_filename = normalize_utf8_path (src_filename); if (!norm_src_filename) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Path is in valid UTF8 encoding"); goto out; } norm_dst_dir = normalize_utf8_path (dst_dir); if (!norm_dst_dir) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Path is in valid UTF8 encoding"); goto out; } norm_dst_filename = normalize_utf8_path (dst_filename); if (!norm_dst_filename) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Path is in valid UTF8 encoding"); goto out; } rsrc_dir = format_dir_path (norm_src_dir); rdst_dir = format_dir_path (norm_dst_dir); ret = (GObject *)seaf_repo_manager_move_multiple_files (seaf->repo_mgr, src_repo_id, rsrc_dir, norm_src_filename, dst_repo_id, rdst_dir, norm_dst_filename, replace, user, need_progress, synchronous, error); out: g_free (norm_src_dir); g_free (norm_src_filename); g_free (norm_dst_dir); g_free (norm_dst_filename); g_free (rsrc_dir); g_free (rdst_dir); return ret; } GObject * seafile_get_copy_task (const char *task_id, GError **error) { return (GObject *)seaf_copy_manager_get_task (seaf->copy_mgr, task_id); } int seafile_cancel_copy_task (const char *task_id, GError **error) { return seaf_copy_manager_cancel_task (seaf->copy_mgr, task_id); } int seafile_rename_file (const char *repo_id, const char *parent_dir, const char *oldname, const char *newname, const char *user, GError **error) { char *norm_parent_dir = NULL, *norm_oldname = NULL, *norm_newname = NULL; char *rpath = NULL; int ret = 0; if (!repo_id || !parent_dir || !oldname || !newname || !user) { g_set_error (error, 0, SEAF_ERR_BAD_ARGS, "Argument should not be null"); return -1; } if (!is_uuid_valid (repo_id)) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid repo id"); return -1; } norm_parent_dir = normalize_utf8_path (parent_dir); if (!norm_parent_dir) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Path is in valid UTF8 encoding"); ret = -1; goto out; } norm_oldname = normalize_utf8_path (oldname); if (!norm_oldname) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Path is in valid UTF8 encoding"); ret = -1; goto out; } norm_newname = normalize_utf8_path (newname); if (!norm_newname) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Path is in valid UTF8 encoding"); ret = -1; goto out; } rpath = format_dir_path (norm_parent_dir); if (seaf_repo_manager_rename_file (seaf->repo_mgr, repo_id, rpath, norm_oldname, norm_newname, user, error) < 0) { ret = -1; } out: g_free (norm_parent_dir); g_free (norm_oldname); g_free (norm_newname); g_free (rpath); return ret; } int seafile_is_valid_filename (const char *repo_id, const char *filename, GError **error) { if (!repo_id || !filename) { g_set_error (error, 0, SEAF_ERR_BAD_ARGS, "Argument should not be null"); return -1; } int ret = seaf_repo_manager_is_valid_filename (seaf->repo_mgr, repo_id, filename, error); return ret; } char * seafile_create_repo (const char *repo_name, const char *repo_desc, const char *owner_email, const char *passwd, int enc_version, const char *pwd_hash_algo, const char *pwd_hash_params, GError **error) { if (!repo_name || !repo_desc || !owner_email) { g_set_error (error, 0, SEAF_ERR_BAD_ARGS, "Argument should not be null"); return NULL; } char *repo_id; repo_id = seaf_repo_manager_create_new_repo (seaf->repo_mgr, repo_name, repo_desc, owner_email, passwd, enc_version, pwd_hash_algo, pwd_hash_params, error); return repo_id; } char * seafile_create_enc_repo (const char *repo_id, const char *repo_name, const char *repo_desc, const char *owner_email, const char *magic, const char *random_key, const char *salt, int enc_version, const char *pwd_hash, const char *pwd_hash_algo, const char *pwd_hash_params, GError **error) { if (!repo_id || !repo_name || !repo_desc || !owner_email) { g_set_error (error, 0, SEAF_ERR_BAD_ARGS, "Argument should not be null"); return NULL; } char *ret; ret = seaf_repo_manager_create_enc_repo (seaf->repo_mgr, repo_id, repo_name, repo_desc, owner_email, magic, random_key, salt, enc_version, pwd_hash, pwd_hash_algo, pwd_hash_params, error); return ret; } int seafile_set_user_quota (const char *user, gint64 quota, GError **error) { if (!user) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Bad arguments"); return -1; } return seaf_quota_manager_set_user_quota (seaf->quota_mgr, user, quota); } gint64 seafile_get_user_quota (const char *user, GError **error) { if (!user) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Bad arguments"); return -1; } return seaf_quota_manager_get_user_quota (seaf->quota_mgr, user); } int seafile_check_quota (const char *repo_id, gint64 delta, GError **error) { int rc; if (!repo_id) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Bad arguments"); return -1; } rc = seaf_quota_manager_check_quota_with_delta (seaf->quota_mgr, repo_id, delta); if (rc == 1) return -1; return rc; } GList * seafile_list_user_quota_usage (GError **error) { return seaf_repo_quota_manager_list_user_quota_usage (seaf->quota_mgr); } static char * get_obj_id_by_path (const char *repo_id, const char *path, gboolean want_dir, GError **error) { SeafRepo *repo = NULL; SeafCommit *commit = NULL; char *obj_id = NULL; if (!repo_id || !path) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Bad arguments"); return NULL; } if (!is_uuid_valid (repo_id)) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid repo id"); return NULL; } repo = seaf_repo_manager_get_repo (seaf->repo_mgr, repo_id); if (!repo) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_INTERNAL, "Get repo error"); goto out; } commit = seaf_commit_manager_get_commit (seaf->commit_mgr, repo->id, repo->version, repo->head->commit_id); if (!commit) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_INTERNAL, "Get commit error"); goto out; } guint32 mode = 0; obj_id = seaf_fs_manager_path_to_obj_id (seaf->fs_mgr, repo->store_id, repo->version, commit->root_id, path, &mode, error); out: if (repo) seaf_repo_unref (repo); if (commit) seaf_commit_unref (commit); if (obj_id) { /* check if the mode matches */ if ((want_dir && !S_ISDIR(mode)) || ((!want_dir) && S_ISDIR(mode))) { g_free (obj_id); return NULL; } } return obj_id; } char *seafile_get_file_id_by_path (const char *repo_id, const char *path, GError **error) { if (!repo_id || !path) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Bad arguments"); return NULL; } char *rpath = format_dir_path (path); char *ret = get_obj_id_by_path (repo_id, rpath, FALSE, error); g_free (rpath); filter_error (error); return ret; } char *seafile_get_dir_id_by_path (const char *repo_id, const char *path, GError **error) { if (!repo_id || !path) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Bad arguments"); return NULL; } char *rpath = format_dir_path (path); char *ret = get_obj_id_by_path (repo_id, rpath, TRUE, error); g_free (rpath); filter_error (error); return ret; } GObject * seafile_get_dirent_by_path (const char *repo_id, const char *path, GError **error) { if (!repo_id || !path) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Bad arguments"); return NULL; } if (!is_uuid_valid (repo_id)) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "invalid repo id"); return NULL; } char *rpath = format_dir_path (path); if (strcmp (rpath, "/") == 0) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "invalid path"); g_free (rpath); return NULL; } SeafRepo *repo = seaf_repo_manager_get_repo (seaf->repo_mgr, repo_id); if (!repo) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_INTERNAL, "Get repo error"); return NULL; } SeafCommit *commit = seaf_commit_manager_get_commit (seaf->commit_mgr, repo->id, repo->version, repo->head->commit_id); if (!commit) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_INTERNAL, "Get commit error"); seaf_repo_unref (repo); return NULL; } SeafDirent *dirent = seaf_fs_manager_get_dirent_by_path (seaf->fs_mgr, repo->store_id, repo->version, commit->root_id, rpath, error); g_free (rpath); if (!dirent) { filter_error (error); seaf_repo_unref (repo); seaf_commit_unref (commit); return NULL; } GObject *obj = g_object_new (SEAFILE_TYPE_DIRENT, "obj_id", dirent->id, "obj_name", dirent->name, "mode", dirent->mode, "version", dirent->version, "mtime", dirent->mtime, "size", dirent->size, "modifier", dirent->modifier, NULL); seaf_repo_unref (repo); seaf_commit_unref (commit); seaf_dirent_free (dirent); return obj; } char * seafile_list_file_blocks (const char *repo_id, const char *file_id, int offset, int limit, GError **error) { SeafRepo *repo; Seafile *file; GString *buf = g_string_new (""); int index = 0; if (!repo_id || !is_uuid_valid(repo_id)) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_DIR_ID, "Bad repo id"); return NULL; } if (!file_id || !is_object_id_valid(file_id)) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_DIR_ID, "Bad file id"); return NULL; } repo = seaf_repo_manager_get_repo (seaf->repo_mgr, repo_id); if (!repo) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Bad repo id"); return NULL; } file = seaf_fs_manager_get_seafile (seaf->fs_mgr, repo->store_id, repo->version, file_id); if (!file) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_DIR_ID, "Bad file id"); seaf_repo_unref (repo); return NULL; } if (offset < 0) offset = 0; for (index = 0; index < file->n_blocks; index++) { if (index < offset) { continue; } if (limit > 0) { if (index >= offset + limit) break; } g_string_append_printf (buf, "%s\n", file->blk_sha1s[index]); } seafile_unref (file); seaf_repo_unref (repo); return g_string_free (buf, FALSE); } /* * Directories are always before files. Otherwise compare the names. */ static gint comp_dirent_func (gconstpointer a, gconstpointer b) { const SeafDirent *dent_a = a, *dent_b = b; if (S_ISDIR(dent_a->mode) && S_ISREG(dent_b->mode)) return -1; if (S_ISREG(dent_a->mode) && S_ISDIR(dent_b->mode)) return 1; return strcasecmp (dent_a->name, dent_b->name); } GList * seafile_list_dir (const char *repo_id, const char *dir_id, int offset, int limit, GError **error) { SeafRepo *repo; SeafDir *dir; SeafDirent *dent; SeafileDirent *d; GList *res = NULL; GList *p; if (!repo_id || !is_uuid_valid(repo_id)) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_DIR_ID, "Bad repo id"); return NULL; } if (!dir_id || !is_object_id_valid (dir_id)) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_DIR_ID, "Bad dir id"); return NULL; } repo = seaf_repo_manager_get_repo (seaf->repo_mgr, repo_id); if (!repo) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Bad repo id"); return NULL; } dir = seaf_fs_manager_get_seafdir (seaf->fs_mgr, repo->store_id, repo->version, dir_id); if (!dir) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_DIR_ID, "Bad dir id"); seaf_repo_unref (repo); return NULL; } dir->entries = g_list_sort (dir->entries, comp_dirent_func); if (offset < 0) { offset = 0; } int index = 0; for (p = dir->entries; p != NULL; p = p->next, index++) { if (index < offset) { continue; } if (limit > 0) { if (index >= offset + limit) break; } dent = p->data; if (!is_object_id_valid (dent->id)) continue; d = g_object_new (SEAFILE_TYPE_DIRENT, "obj_id", dent->id, "obj_name", dent->name, "mode", dent->mode, "version", dent->version, "mtime", dent->mtime, "size", dent->size, "permission", "", NULL); res = g_list_prepend (res, d); } seaf_dir_free (dir); seaf_repo_unref (repo); res = g_list_reverse (res); return res; } GList * seafile_list_file_revisions (const char *repo_id, const char *commit_id, const char *path, int limit, GError **error) { if (!repo_id || !path) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Bad arguments"); return NULL; } if (!is_uuid_valid (repo_id)) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid repo id"); return NULL; } char *rpath = format_dir_path (path); GList *commit_list; commit_list = seaf_repo_manager_list_file_revisions (seaf->repo_mgr, repo_id, commit_id, rpath, limit, FALSE, FALSE, error); g_free (rpath); return commit_list; } GList * seafile_calc_files_last_modified (const char *repo_id, const char *parent_dir, int limit, GError **error) { if (!repo_id || !parent_dir) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Bad arguments"); return NULL; } if (!is_uuid_valid (repo_id)) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid repo id"); return NULL; } char *rpath = format_dir_path (parent_dir); GList *ret = seaf_repo_manager_calc_files_last_modified (seaf->repo_mgr, repo_id, rpath, limit, error); g_free (rpath); return ret; } int seafile_revert_file (const char *repo_id, const char *commit_id, const char *path, const char *user, GError **error) { if (!repo_id || !commit_id || !path || !user) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Bad arguments"); return -1; } if (!is_uuid_valid (repo_id)) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid repo id"); return -1; } if (!is_object_id_valid (commit_id)) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid commit id"); return -1; } char *rpath = format_dir_path (path); int ret = seaf_repo_manager_revert_file (seaf->repo_mgr, repo_id, commit_id, rpath, user, error); g_free (rpath); return ret; } int seafile_revert_dir (const char *repo_id, const char *commit_id, const char *path, const char *user, GError **error) { if (!repo_id || !commit_id || !path || !user) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Bad arguments"); return -1; } if (!is_uuid_valid (repo_id)) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid repo id"); return -1; } if (!is_object_id_valid (commit_id)) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid commit id"); return -1; } char *rpath = format_dir_path (path); int ret = seaf_repo_manager_revert_dir (seaf->repo_mgr, repo_id, commit_id, rpath, user, error); g_free (rpath); return ret; } char * seafile_check_repo_blocks_missing (const char *repo_id, const char *blockids_json, GError **error) { json_t *array, *value, *ret_json; json_error_t err; size_t index; char *json_data, *ret; SeafRepo *repo = NULL; array = json_loadb (blockids_json, strlen(blockids_json), 0, &err); if (!array) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Bad arguments"); return NULL; } repo = seaf_repo_manager_get_repo (seaf->repo_mgr, repo_id); if (!repo) { seaf_warning ("Failed to get repo %.8s.\n", repo_id); g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Repo not found"); json_decref (array); return NULL; } ret_json = json_array(); size_t n = json_array_size (array); for (index = 0; index < n; index++) { value = json_array_get (array, index); const char *blockid = json_string_value (value); if (!blockid) continue; if (!seaf_block_manager_block_exists(seaf->block_mgr, repo_id, repo->version, blockid)) { json_array_append_new (ret_json, json_string(blockid)); } } json_data = json_dumps (ret_json, 0); ret = g_strdup (json_data); free (json_data); json_decref (ret_json); json_decref (array); seaf_repo_unref (repo); return ret; } GList * seafile_get_deleted (const char *repo_id, int show_days, const char *path, const char *scan_stat, int limit, GError **error) { if (!repo_id) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Bad arguments"); return NULL; } if (!is_uuid_valid (repo_id)) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid repo id"); return NULL; } char *rpath = NULL; if (path) rpath = format_dir_path (path); GList *ret = seaf_repo_manager_get_deleted_entries (seaf->repo_mgr, repo_id, show_days, rpath, scan_stat, limit, error); g_free (rpath); return ret; } char * seafile_generate_repo_token (const char *repo_id, const char *email, GError **error) { char *token; if (!repo_id || !email) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Arguments should not be empty"); return NULL; } if (!is_uuid_valid (repo_id)) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid repo id"); return NULL; } token = seaf_repo_manager_generate_repo_token (seaf->repo_mgr, repo_id, email, error); return token; } int seafile_delete_repo_token (const char *repo_id, const char *token, const char *user, GError **error) { if (!repo_id || !token || !user) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Arguments should not be empty"); return -1; } if (!is_uuid_valid (repo_id)) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid repo id"); return -1; } return seaf_repo_manager_delete_token (seaf->repo_mgr, repo_id, token, user, error); } GList * seafile_list_repo_tokens (const char *repo_id, GError **error) { GList *ret_list; if (!repo_id) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Arguments should not be empty"); return NULL; } if (!is_uuid_valid (repo_id)) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid repo id"); return NULL; } ret_list = seaf_repo_manager_list_repo_tokens (seaf->repo_mgr, repo_id, error); return ret_list; } GList * seafile_list_repo_tokens_by_email (const char *email, GError **error) { GList *ret_list; if (!email) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Arguments should not be empty"); return NULL; } ret_list = seaf_repo_manager_list_repo_tokens_by_email (seaf->repo_mgr, email, error); return ret_list; } int seafile_delete_repo_tokens_by_peer_id(const char *email, const char *peer_id, GError **error) { if (!email || !peer_id) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Arguments should not be empty"); return -1; } /* check the peer id */ if (strlen(peer_id) != 40) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "invalid peer id"); return -1; } const char *c = peer_id; while (*c) { char v = *c; if ((v >= '0' && v <= '9') || (v >= 'a' && v <= 'z')) { c++; continue; } else { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "invalid peer id"); return -1; } } GList *tokens = NULL; if (seaf_repo_manager_delete_repo_tokens_by_peer_id (seaf->repo_mgr, email, peer_id, &tokens, error) < 0) { g_list_free_full (tokens, (GDestroyNotify)g_free); return -1; } #ifdef HAVE_EVHTP seaf_http_server_invalidate_tokens(seaf->http_server, tokens); #endif g_list_free_full (tokens, (GDestroyNotify)g_free); return 0; } int seafile_delete_repo_tokens_by_email (const char *email, GError **error) { if (!email) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Arguments should not be empty"); return -1; } return seaf_repo_manager_delete_repo_tokens_by_email (seaf->repo_mgr, email, error); } char * seafile_check_permission (const char *repo_id, const char *user, GError **error) { if (!repo_id || !user) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Arguments should not be empty"); return NULL; } if (!is_uuid_valid (repo_id)) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid repo id"); return NULL; } if (strlen(user) == 0) return NULL; return seaf_repo_manager_check_permission (seaf->repo_mgr, repo_id, user, error); } char * seafile_check_permission_by_path (const char *repo_id, const char *path, const char *user, GError **error) { return seafile_check_permission (repo_id, user, error); } GList * seafile_list_dir_with_perm (const char *repo_id, const char *path, const char *dir_id, const char *user, int offset, int limit, GError **error) { if (!repo_id || !is_uuid_valid (repo_id)) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid repo id"); return NULL; } if (!path) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid path"); return NULL; } if (!dir_id || !is_object_id_valid (dir_id)) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid dir id"); return NULL; } if (!user) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid user"); return NULL; } char *rpath = format_dir_path (path); GList *ret = seaf_repo_manager_list_dir_with_perm (seaf->repo_mgr, repo_id, rpath, dir_id, user, offset, limit, error); g_free (rpath); return ret; } int seafile_set_share_permission (const char *repo_id, const char *from_email, const char *to_email, const char *permission, GError **error) { if (!repo_id || !from_email || !to_email || !permission) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Arguments should not be empty"); return -1; } if (!is_uuid_valid (repo_id)) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid repo_id parameter"); return -1; } if (!is_permission_valid (permission)) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid permission parameter"); return -1; } return seaf_share_manager_set_permission (seaf->share_mgr, repo_id, from_email, to_email, permission); } int seafile_set_group_repo_permission (int group_id, const char *repo_id, const char *permission, GError **error) { if (!repo_id || !permission) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Arguments should not be empty"); return -1; } if (!is_uuid_valid (repo_id)) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid repo id"); return -1; } if (!is_permission_valid (permission)) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid permission parameter"); return -1; } return seaf_repo_manager_set_group_repo_perm (seaf->repo_mgr, repo_id, group_id, permission, error); } char * seafile_get_file_id_by_commit_and_path(const char *repo_id, const char *commit_id, const char *path, GError **error) { SeafRepo *repo; SeafCommit *commit; char *file_id; guint32 mode; if (!repo_id || !is_uuid_valid(repo_id) || !commit_id || !path) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Arguments should not be empty"); return NULL; } repo = seaf_repo_manager_get_repo (seaf->repo_mgr, repo_id); if (!repo) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Bad repo id"); return NULL; } commit = seaf_commit_manager_get_commit(seaf->commit_mgr, repo_id, repo->version, commit_id); if (!commit) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "bad commit id"); seaf_repo_unref (repo); return NULL; } char *rpath = format_dir_path (path); file_id = seaf_fs_manager_path_to_obj_id (seaf->fs_mgr, repo->store_id, repo->version, commit->root_id, rpath, &mode, error); if (file_id && S_ISDIR(mode)) { g_free (file_id); file_id = NULL; } g_free (rpath); filter_error (error); seaf_commit_unref(commit); seaf_repo_unref (repo); return file_id; } /* Virtual repo related */ char * seafile_create_virtual_repo (const char *origin_repo_id, const char *path, const char *repo_name, const char *repo_desc, const char *owner, const char *passwd, GError **error) { if (!origin_repo_id || !path ||!repo_name || !repo_desc || !owner) { g_set_error (error, 0, SEAF_ERR_BAD_ARGS, "Argument should not be null"); return NULL; } if (!is_uuid_valid (origin_repo_id)) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid repo id"); return NULL; } char *repo_id; char *rpath = format_dir_path (path); repo_id = seaf_repo_manager_create_virtual_repo (seaf->repo_mgr, origin_repo_id, rpath, repo_name, repo_desc, owner, passwd, error); g_free (rpath); return repo_id; } GList * seafile_get_virtual_repos_by_owner (const char *owner, GError **error) { GList *repos, *ret = NULL, *ptr; SeafRepo *r, *o; SeafileRepo *repo; char *orig_repo_id; gboolean is_original_owner; if (!owner) { g_set_error (error, 0, SEAF_ERR_BAD_ARGS, "Argument should not be null"); return NULL; } repos = seaf_repo_manager_get_virtual_repos_by_owner (seaf->repo_mgr, owner, error); for (ptr = repos; ptr != NULL; ptr = ptr->next) { r = ptr->data; orig_repo_id = r->virtual_info->origin_repo_id; o = seaf_repo_manager_get_repo (seaf->repo_mgr, orig_repo_id); if (!o) { seaf_warning ("Failed to get origin repo %.10s.\n", orig_repo_id); seaf_repo_unref (r); continue; } char *orig_owner = seaf_repo_manager_get_repo_owner (seaf->repo_mgr, orig_repo_id); if (g_strcmp0 (orig_owner, owner) == 0) is_original_owner = TRUE; else is_original_owner = FALSE; g_free (orig_owner); char *perm = seaf_repo_manager_check_permission (seaf->repo_mgr, r->id, owner, NULL); repo = (SeafileRepo *)convert_repo (r); if (repo) { g_object_set (repo, "is_original_owner", is_original_owner, "origin_repo_name", o->name, "virtual_perm", perm, NULL); ret = g_list_prepend (ret, repo); } seaf_repo_unref (r); seaf_repo_unref (o); g_free (perm); } g_list_free (repos); return g_list_reverse (ret); } GObject * seafile_get_virtual_repo (const char *origin_repo, const char *path, const char *owner, GError **error) { char *repo_id; GObject *repo_obj; char *rpath = format_dir_path (path); repo_id = seaf_repo_manager_get_virtual_repo_id (seaf->repo_mgr, origin_repo, rpath, owner); g_free (rpath); if (!repo_id) return NULL; repo_obj = seafile_get_repo (repo_id, error); g_free (repo_id); return repo_obj; } /* System default library */ char * seafile_get_system_default_repo_id (GError **error) { return get_system_default_repo_id(seaf); } static int update_valid_since_time (SeafRepo *repo, gint64 new_time) { int ret = 0; gint64 old_time = seaf_repo_manager_get_repo_valid_since (repo->manager, repo->id); if (new_time > 0) { if (new_time > old_time) ret = seaf_repo_manager_set_repo_valid_since (repo->manager, repo->id, new_time); } else if (new_time == 0) { /* Only the head commit is valid after GC if no history is kept. */ SeafCommit *head = seaf_commit_manager_get_commit (seaf->commit_mgr, repo->id, repo->version, repo->head->commit_id); if (head && (old_time < 0 || head->ctime > (guint64)old_time)) ret = seaf_repo_manager_set_repo_valid_since (repo->manager, repo->id, head->ctime); seaf_commit_unref (head); } return ret; } /* Clean up a repo's history. * It just set valid-since time but not actually delete the data. */ int seafile_clean_up_repo_history (const char *repo_id, int keep_days, GError **error) { SeafRepo *repo; int ret; if (!is_uuid_valid (repo_id)) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid arguments"); return -1; } repo = seaf_repo_manager_get_repo (seaf->repo_mgr, repo_id); if (!repo) { seaf_warning ("Cannot find repo %s.\n", repo_id); g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid arguments"); return -1; } gint64 truncate_time, now; if (keep_days > 0) { now = (gint64)time(NULL); truncate_time = now - keep_days * 24 * 3600; } else truncate_time = 0; ret = update_valid_since_time (repo, truncate_time); if (ret < 0) { seaf_warning ("Failed to update valid since time for repo %.8s.\n", repo->id); g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "Database error"); } seaf_repo_unref (repo); return ret; } GList * seafile_get_shared_users_for_subdir (const char *repo_id, const char *path, const char *from_user, GError **error) { if (!repo_id || !path || !from_user) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Argument should not be null"); return NULL; } if (!is_uuid_valid (repo_id)) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid repo_id"); return NULL; } char *rpath = format_dir_path (path); GList *ret = seaf_repo_manager_get_shared_users_for_subdir (seaf->repo_mgr, repo_id, rpath, from_user, error); g_free (rpath); return ret; } GList * seafile_get_shared_groups_for_subdir (const char *repo_id, const char *path, const char *from_user, GError **error) { if (!repo_id || !path || !from_user) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Argument should not be null"); return NULL; } if (!is_uuid_valid (repo_id)) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid repo_id"); return NULL; } char *rpath = format_dir_path (path); GList *ret = seaf_repo_manager_get_shared_groups_for_subdir (seaf->repo_mgr, repo_id, rpath, from_user, error); g_free (rpath); return ret; } gint64 seafile_get_total_file_number (GError **error) { return seaf_get_total_file_number (error); } gint64 seafile_get_total_storage (GError **error) { return seaf_get_total_storage (error); } GObject * seafile_get_file_count_info_by_path (const char *repo_id, const char *path, GError **error) { if (!repo_id || !path) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Argument should not be null"); return NULL; } GObject *ret = NULL; SeafRepo *repo = NULL; repo = seaf_repo_manager_get_repo (seaf->repo_mgr, repo_id); if (!repo) { seaf_warning ("Failed to get repo %.10s\n", repo_id); g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "Library not exists"); return NULL; } ret = seaf_fs_manager_get_file_count_info_by_path (seaf->fs_mgr, repo->store_id, repo->version, repo->root_id, path, error); seaf_repo_unref (repo); return ret; } char * seafile_get_trash_repo_owner (const char *repo_id, GError **error) { if (!repo_id) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Argument should not be null"); return NULL; } return seaf_get_trash_repo_owner (repo_id); } int seafile_mkdir_with_parents (const char *repo_id, const char *parent_dir, const char *new_dir_path, const char *user, GError **error) { if (!repo_id || !parent_dir || !new_dir_path || !user) { g_set_error (error, 0, SEAF_ERR_BAD_ARGS, "Argument should not be null"); return -1; } if (!is_uuid_valid (repo_id)) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid repo id"); return -1; } if (seaf_repo_manager_mkdir_with_parents (seaf->repo_mgr, repo_id, parent_dir, new_dir_path, user, error) < 0) { return -1; } return 0; } int seafile_set_server_config_int (const char *group, const char *key, int value, GError **error) { if (!group || !key) { g_set_error (error, 0, SEAF_ERR_BAD_ARGS, "Argument should not be null"); return -1; } return seaf_cfg_manager_set_config_int (seaf->cfg_mgr, group, key, value); } int seafile_get_server_config_int (const char *group, const char *key, GError **error) { if (!group || !key ) { g_set_error (error, 0, SEAF_ERR_BAD_ARGS, "Argument should not be null"); return -1; } return seaf_cfg_manager_get_config_int (seaf->cfg_mgr, group, key); } int seafile_set_server_config_int64 (const char *group, const char *key, gint64 value, GError **error) { if (!group || !key) { g_set_error (error, 0, SEAF_ERR_BAD_ARGS, "Argument should not be null"); return -1; } return seaf_cfg_manager_set_config_int64 (seaf->cfg_mgr, group, key, value); } gint64 seafile_get_server_config_int64 (const char *group, const char *key, GError **error) { if (!group || !key ) { g_set_error (error, 0, SEAF_ERR_BAD_ARGS, "Argument should not be null"); return -1; } return seaf_cfg_manager_get_config_int64 (seaf->cfg_mgr, group, key); } int seafile_set_server_config_string (const char *group, const char *key, const char *value, GError **error) { if (!group || !key || !value) { g_set_error (error, 0, SEAF_ERR_BAD_ARGS, "Argument should not be null"); return -1; } return seaf_cfg_manager_set_config_string (seaf->cfg_mgr, group, key, value); } char * seafile_get_server_config_string (const char *group, const char *key, GError **error) { if (!group || !key ) { g_set_error (error, 0, SEAF_ERR_BAD_ARGS, "Argument should not be null"); return NULL; } return seaf_cfg_manager_get_config_string (seaf->cfg_mgr, group, key); } int seafile_set_server_config_boolean (const char *group, const char *key, int value, GError **error) { if (!group || !key) { g_set_error (error, 0, SEAF_ERR_BAD_ARGS, "Argument should not be null"); return -1; } return seaf_cfg_manager_set_config_boolean (seaf->cfg_mgr, group, key, value); } int seafile_get_server_config_boolean (const char *group, const char *key, GError **error) { if (!group || !key ) { g_set_error (error, 0, SEAF_ERR_BAD_ARGS, "Argument should not be null"); return -1; } return seaf_cfg_manager_get_config_boolean (seaf->cfg_mgr, group, key); } GObject * seafile_get_group_shared_repo_by_path (const char *repo_id, const char *path, int group_id, int is_org, GError **error) { if (!repo_id || group_id < 0) { g_set_error (error, 0, SEAF_ERR_BAD_ARGS, "Arguments error"); return NULL; } SeafRepoManager *mgr = seaf->repo_mgr; return seaf_get_group_shared_repo_by_path (mgr, repo_id, path, group_id, is_org ? TRUE:FALSE, error); } GObject * seafile_get_shared_repo_by_path (const char *repo_id, const char *path, const char *shared_to, int is_org, GError **error) { if (!repo_id || !shared_to) { g_set_error (error, 0, SEAF_ERR_BAD_ARGS, "Arguments error"); return NULL; } SeafRepoManager *mgr = seaf->repo_mgr; return seaf_get_shared_repo_by_path (mgr, repo_id, path, shared_to, is_org ? TRUE:FALSE, error); } GList * seafile_get_group_repos_by_user (const char *user, GError **error) { if (!user) { g_set_error (error, 0, SEAF_ERR_BAD_ARGS, "Arguments error"); return NULL; } SeafRepoManager *mgr = seaf->repo_mgr; return seaf_get_group_repos_by_user (mgr, user, -1, error); } GList * seafile_get_org_group_repos_by_user (const char *user, int org_id, GError **error) { if (!user) { g_set_error (error, 0, SEAF_ERR_BAD_ARGS, "Arguments error"); return NULL; } SeafRepoManager *mgr = seaf->repo_mgr; return seaf_get_group_repos_by_user (mgr, user, org_id, error); } int seafile_repo_has_been_shared (const char *repo_id, int including_groups, GError **error) { if (!repo_id) { g_set_error (error, 0, SEAF_ERR_BAD_ARGS, "Arguments error"); return FALSE; } gboolean exists = seaf_share_manager_repo_has_been_shared (seaf->share_mgr, repo_id, including_groups ? TRUE : FALSE); return exists ? 1 : 0; } GList * seafile_get_shared_users_by_repo (const char *repo_id, GError **error) { if (!repo_id) { g_set_error (error, 0, SEAF_ERR_BAD_ARGS, "Arguments error"); return NULL; } return seaf_share_manager_get_shared_users_by_repo (seaf->share_mgr, repo_id); } GList * seafile_org_get_shared_users_by_repo (int org_id, const char *repo_id, GError **error) { if (!repo_id || org_id < 0) { g_set_error (error, 0, SEAF_ERR_BAD_ARGS, "Arguments error"); return NULL; } return seaf_share_manager_org_get_shared_users_by_repo (seaf->share_mgr, org_id, repo_id); } /* Resumable file upload. */ gint64 seafile_get_upload_tmp_file_offset (const char *repo_id, const char *file_path, GError **error) { if (!repo_id || !is_uuid_valid(repo_id)) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid repo id"); return -1; } int path_len; if (!file_path || (path_len = strlen(file_path)) == 0) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid file path"); return -1; } char *rfile_path = format_dir_path (file_path); gint64 ret = seaf_repo_manager_get_upload_tmp_file_offset (seaf->repo_mgr, repo_id, rfile_path, error); g_free (rfile_path); return ret; } char * seafile_convert_repo_path (const char *repo_id, const char *path, const char *user, int is_org, GError **error) { if (!is_uuid_valid(repo_id) || !path || !user) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Arguments error"); return NULL; } char *rpath = format_dir_path (path); char *ret = seaf_repo_manager_convert_repo_path(seaf->repo_mgr, repo_id, rpath, user, is_org ? TRUE : FALSE, error); g_free(rpath); return ret; } int seafile_set_repo_status(const char *repo_id, int status, GError **error) { if (!is_uuid_valid(repo_id) || status < 0 || status >= N_REPO_STATUS) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Arguments error"); return -1; } return seaf_repo_manager_set_repo_status(seaf->repo_mgr, repo_id, status); } int seafile_get_repo_status(const char *repo_id, GError **error) { int status; if (!is_uuid_valid(repo_id)) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Arguments error"); return -1; } status = seaf_repo_manager_get_repo_status(seaf->repo_mgr, repo_id); return (status == -1) ? 0 : status; } GList * seafile_search_files (const char *repo_id, const char *str, GError **error) { return seafile_search_files_by_path (repo_id, NULL, str, error); } GList * seafile_search_files_by_path (const char *repo_id, const char *path, const char *str, GError **error) { if (!is_uuid_valid (repo_id)) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid repo id"); return NULL; } GList *file_list = seaf_fs_manager_search_files_by_path (seaf->fs_mgr, repo_id, path, str); GList *ret = NULL, *ptr; for (ptr = file_list; ptr; ptr=ptr->next) { SearchResult *sr = ptr->data; SeafileSearchResult *search_result = seafile_search_result_new (); g_object_set (search_result, "path", sr->path, "size", sr->size, "mtime", sr->mtime, "is_dir", sr->is_dir, NULL); ret = g_list_prepend (ret, search_result); g_free (sr->path); g_free (sr); } return g_list_reverse (ret); } /*RPC functions merged from ccnet-server*/ int ccnet_rpc_add_emailuser (const char *email, const char *passwd, int is_staff, int is_active, GError **error) { CcnetUserManager *user_mgr = seaf->user_mgr; int ret; if (!email || !passwd) { g_set_error (error, CCNET_DOMAIN, CCNET_ERR_INTERNAL, "Email and passwd can not be NULL"); return -1; } ret = ccnet_user_manager_add_emailuser (user_mgr, email, passwd, is_staff, is_active); return ret; } int ccnet_rpc_remove_emailuser (const char *source, const char *email, GError **error) { CcnetUserManager *user_mgr = seaf->user_mgr; int ret; if (!email) { g_set_error (error, CCNET_DOMAIN, CCNET_ERR_INTERNAL, "Email can not be NULL"); return -1; } ret = ccnet_user_manager_remove_emailuser (user_mgr, source, email); return ret; } int ccnet_rpc_validate_emailuser (const char *email, const char *passwd, GError **error) { CcnetUserManager *user_mgr = seaf->user_mgr; int ret; if (!email || !passwd) { g_set_error (error, CCNET_DOMAIN, CCNET_ERR_INTERNAL, "Email and passwd can not be NULL"); return -1; } if (passwd[0] == 0) return -1; ret = ccnet_user_manager_validate_emailuser (user_mgr, email, passwd); return ret; } GObject* ccnet_rpc_get_emailuser (const char *email, GError **error) { if (!email) { g_set_error (error, CCNET_DOMAIN, CCNET_ERR_INTERNAL, "Email can not be NULL"); return NULL; } CcnetUserManager *user_mgr = seaf->user_mgr; CcnetEmailUser *emailuser = NULL; emailuser = ccnet_user_manager_get_emailuser (user_mgr, email, error); return (GObject *)emailuser; } GObject* ccnet_rpc_get_emailuser_with_import (const char *email, GError **error) { if (!email) { g_set_error (error, CCNET_DOMAIN, CCNET_ERR_INTERNAL, "Email can not be NULL"); return NULL; } CcnetUserManager *user_mgr = seaf->user_mgr; CcnetEmailUser *emailuser = NULL; emailuser = ccnet_user_manager_get_emailuser_with_import (user_mgr, email, error); return (GObject *)emailuser; } GObject* ccnet_rpc_get_emailuser_by_id (int id, GError **error) { CcnetUserManager *user_mgr = seaf->user_mgr; CcnetEmailUser *emailuser = NULL; emailuser = ccnet_user_manager_get_emailuser_by_id (user_mgr, id); return (GObject *)emailuser; } GList* ccnet_rpc_get_emailusers (const char *source, int start, int limit, const char *status, GError **error) { CcnetUserManager *user_mgr = seaf->user_mgr; GList *emailusers = NULL; emailusers = ccnet_user_manager_get_emailusers (user_mgr, source, start, limit, status); return emailusers; } GList* ccnet_rpc_search_emailusers (const char *source, const char *email_patt, int start, int limit, GError **error) { CcnetUserManager *user_mgr = seaf->user_mgr; GList *emailusers = NULL; emailusers = ccnet_user_manager_search_emailusers (user_mgr, source, email_patt, start, limit); return emailusers; } GList* ccnet_rpc_search_groups (const char *group_patt, int start, int limit, GError **error) { CcnetGroupManager *group_mgr = seaf->group_mgr; GList *groups = NULL; groups = ccnet_group_manager_search_groups (group_mgr, group_patt, start, limit); return groups; } GList * ccnet_rpc_search_group_members (int group_id, const char *pattern, GError **error) { CcnetGroupManager *group_mgr = seaf->group_mgr; GList *ret = NULL; ret = ccnet_group_manager_search_group_members (group_mgr, group_id, pattern); return ret; } GList* ccnet_rpc_get_top_groups (int including_org, GError **error) { CcnetGroupManager *group_mgr = seaf->group_mgr; GList *groups = NULL; groups = ccnet_group_manager_get_top_groups (group_mgr, including_org ? TRUE : FALSE, error); return groups; } GList* ccnet_rpc_get_child_groups (int group_id, GError **error) { CcnetGroupManager *group_mgr = seaf->group_mgr; GList *groups = NULL; groups = ccnet_group_manager_get_child_groups (group_mgr, group_id, error); return groups; } GList* ccnet_rpc_get_descendants_groups(int group_id, GError **error) { CcnetGroupManager *group_mgr = seaf->group_mgr; GList *groups = NULL; groups = ccnet_group_manager_get_descendants_groups (group_mgr, group_id, error); return groups; } gint64 ccnet_rpc_count_emailusers (const char *source, GError **error) { CcnetUserManager *user_mgr = seaf->user_mgr; return ccnet_user_manager_count_emailusers (user_mgr, source); } gint64 ccnet_rpc_count_inactive_emailusers (const char *source, GError **error) { CcnetUserManager *user_mgr = seaf->user_mgr; return ccnet_user_manager_count_inactive_emailusers (user_mgr, source); } int ccnet_rpc_update_emailuser (const char *source, int id, const char* passwd, int is_staff, int is_active, GError **error) { CcnetUserManager *user_mgr = seaf->user_mgr; return ccnet_user_manager_update_emailuser(user_mgr, source, id, passwd, is_staff, is_active); } int ccnet_rpc_update_role_emailuser (const char* email, const char* role, GError **error) { CcnetUserManager *user_mgr = seaf->user_mgr; return ccnet_user_manager_update_role_emailuser(user_mgr, email, role); } GList* ccnet_rpc_get_superusers (GError **error) { CcnetUserManager *user_mgr = seaf->user_mgr; return ccnet_user_manager_get_superusers(user_mgr); } GList * ccnet_rpc_get_emailusers_in_list(const char *source, const char *user_list, GError **error) { if (!user_list || !source) { g_set_error (error, CCNET_DOMAIN, CCNET_ERR_INTERNAL, "Bad arguments"); return NULL; } CcnetUserManager *user_mgr = seaf->user_mgr; return ccnet_user_manager_get_emailusers_in_list (user_mgr, source, user_list, error); } int ccnet_rpc_update_emailuser_id (const char *old_email, const char *new_email, GError **error) { if (!old_email || !new_email) { g_set_error (error, CCNET_DOMAIN, CCNET_ERR_INTERNAL, "Bad arguments"); return -1; } CcnetUserManager *user_mgr = seaf->user_mgr; return ccnet_user_manager_update_emailuser_id (user_mgr, old_email, new_email, error); } int ccnet_rpc_create_group (const char *group_name, const char *user_name, const char *type, int parent_group_id, GError **error) { CcnetGroupManager *group_mgr = seaf->group_mgr; int ret; if (!group_name || !user_name) { g_set_error (error, CCNET_DOMAIN, CCNET_ERR_INTERNAL, "Group name and user name can not be NULL"); return -1; } ret = ccnet_group_manager_create_group (group_mgr, group_name, user_name, parent_group_id, error); return ret; } int ccnet_rpc_create_org_group (int org_id, const char *group_name, const char *user_name, int parent_group_id, GError **error) { CcnetGroupManager *group_mgr = seaf->group_mgr; int ret; if (org_id < 0 || !group_name || !user_name) { g_set_error (error, CCNET_DOMAIN, CCNET_ERR_INTERNAL, "Bad args"); return -1; } ret = ccnet_group_manager_create_org_group (group_mgr, org_id, group_name, user_name, parent_group_id, error); return ret; } int ccnet_rpc_remove_group (int group_id, GError **error) { CcnetGroupManager *group_mgr = seaf->group_mgr; int ret; if (group_id <= 0) { g_set_error (error, CCNET_DOMAIN, CCNET_ERR_INTERNAL, "Invalid group_id parameter"); return -1; } ret = ccnet_group_manager_remove_group (group_mgr, group_id, FALSE, error); return ret; } int ccnet_rpc_group_add_member (int group_id, const char *user_name, const char *member_name, GError **error) { CcnetGroupManager *group_mgr = seaf->group_mgr; int ret; if (group_id <= 0 || !user_name || !member_name) { g_set_error (error, CCNET_DOMAIN, CCNET_ERR_INTERNAL, "Group id and user name and member name can not be NULL"); return -1; } ret = ccnet_group_manager_add_member (group_mgr, group_id, user_name, member_name, error); return ret; } int ccnet_rpc_group_remove_member (int group_id, const char *user_name, const char *member_name, GError **error) { CcnetGroupManager *group_mgr = seaf->group_mgr; int ret; if (!user_name || !member_name) { g_set_error (error, CCNET_DOMAIN, CCNET_ERR_INTERNAL, "User name and member name can not be NULL"); return -1; } ret = ccnet_group_manager_remove_member (group_mgr, group_id, user_name, member_name, error); return ret; } int ccnet_rpc_group_set_admin (int group_id, const char *member_name, GError **error) { CcnetGroupManager *group_mgr = seaf->group_mgr; int ret; if (group_id <= 0 || !member_name) { g_set_error (error, CCNET_DOMAIN, CCNET_ERR_INTERNAL, "Bad arguments"); return -1; } ret = ccnet_group_manager_set_admin (group_mgr, group_id, member_name, error); return ret; } int ccnet_rpc_group_unset_admin (int group_id, const char *member_name, GError **error) { CcnetGroupManager *group_mgr = seaf->group_mgr; int ret; if (group_id <= 0 || !member_name) { g_set_error (error, CCNET_DOMAIN, CCNET_ERR_INTERNAL, "Bad arguments"); return -1; } ret = ccnet_group_manager_unset_admin (group_mgr, group_id, member_name, error); return ret; } int ccnet_rpc_set_group_name (int group_id, const char *group_name, GError **error) { CcnetGroupManager *group_mgr = seaf->group_mgr; int ret; if (group_id <= 0 || !group_name) { g_set_error (error, CCNET_DOMAIN, CCNET_ERR_INTERNAL, "Bad arguments"); return -1; } ret = ccnet_group_manager_set_group_name (group_mgr, group_id, group_name, error); return ret; } int ccnet_rpc_quit_group (int group_id, const char *user_name, GError **error) { CcnetGroupManager *group_mgr = seaf->group_mgr; int ret; if (group_id <= 0 || !user_name) { g_set_error (error, CCNET_DOMAIN, CCNET_ERR_INTERNAL, "Group id and user name can not be NULL"); return -1; } ret = ccnet_group_manager_quit_group (group_mgr, group_id, user_name, error); return ret; } GList * ccnet_rpc_get_groups (const char *username, int return_ancestors, GError **error) { CcnetGroupManager *group_mgr = seaf->group_mgr; GList *ret = NULL; if (!username) { g_set_error (error, CCNET_DOMAIN, CCNET_ERR_INTERNAL, "User name can not be NULL"); return NULL; } ret = ccnet_group_manager_get_groups_by_user (group_mgr, username, return_ancestors ? TRUE : FALSE, error); return ret; } GList * ccnet_rpc_list_all_departments (GError **error) { CcnetGroupManager *group_mgr = seaf->group_mgr; GList *ret = NULL; ret = ccnet_group_manager_list_all_departments (group_mgr, error); return ret; } GList* seafile_get_repos_by_id_prefix (const char *id_prefix, int start, int limit, GError **error) { GList *ret = NULL; GList *repos, *ptr; if (!id_prefix) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Argument should not be null"); return NULL; } repos = seaf_repo_manager_get_repos_by_id_prefix (seaf->repo_mgr, id_prefix, start, limit); ret = convert_repo_list (repos); for(ptr = repos; ptr; ptr = ptr->next) { seaf_repo_unref ((SeafRepo *)ptr->data); } g_list_free (repos); return ret; } GList * ccnet_rpc_get_all_groups (int start, int limit, const char *source, GError **error) { CcnetGroupManager *group_mgr = seaf->group_mgr; GList *ret = NULL; ret = ccnet_group_manager_get_all_groups (group_mgr, start, limit, error); return ret; } GList * ccnet_rpc_get_ancestor_groups (int group_id, GError ** error) { CcnetGroupManager *group_mgr = seaf->group_mgr; GList *ret = NULL; ret = ccnet_group_manager_get_ancestor_groups (group_mgr, group_id); return ret; } GObject * ccnet_rpc_get_group (int group_id, GError **error) { CcnetGroupManager *group_mgr = seaf->group_mgr; CcnetGroup *group = NULL; group = ccnet_group_manager_get_group (group_mgr, group_id, error); if (!group) { return NULL; } /* g_object_ref (group); */ return (GObject *)group; } GList * ccnet_rpc_get_group_members (int group_id, int start, int limit, GError **error) { CcnetGroupManager *group_mgr = seaf->group_mgr; GList *ret = NULL; if (start < 0 ) { start = 0; } ret = ccnet_group_manager_get_group_members (group_mgr, group_id, start, limit, error); if (ret == NULL) return NULL; return g_list_reverse (ret); } GList * ccnet_rpc_get_members_with_prefix(int group_id, const char *prefix, GError **error) { CcnetGroupManager *group_mgr = seaf->group_mgr; GList *ret = NULL; ret = ccnet_group_manager_get_members_with_prefix (group_mgr, group_id, prefix, error); return ret; } int ccnet_rpc_check_group_staff (int group_id, const char *user_name, int in_structure, GError **error) { CcnetGroupManager *group_mgr = seaf->group_mgr; if (group_id <= 0 || !user_name) { g_set_error (error, CCNET_DOMAIN, CCNET_ERR_INTERNAL, "Bad arguments"); return -1; } return ccnet_group_manager_check_group_staff (group_mgr, group_id, user_name, in_structure ? TRUE : FALSE); } int ccnet_rpc_remove_group_user (const char *user, GError **error) { CcnetGroupManager *group_mgr = seaf->group_mgr; if (!user) { g_set_error (error, CCNET_DOMAIN, CCNET_ERR_INTERNAL, "Bad arguments"); return -1; } return ccnet_group_manager_remove_group_user (group_mgr, user); } int ccnet_rpc_is_group_user (int group_id, const char *user, int in_structure, GError **error) { CcnetGroupManager *group_mgr = seaf->group_mgr; if (!user || group_id < 0) { g_set_error (error, CCNET_DOMAIN, CCNET_ERR_INTERNAL, "Bad arguments"); return 0; } return ccnet_group_manager_is_group_user (group_mgr, group_id, user, in_structure ? TRUE : FALSE); } int ccnet_rpc_set_group_creator (int group_id, const char *user_name, GError **error) { CcnetGroupManager *group_mgr = seaf->group_mgr; if (!user_name || group_id < 0) { g_set_error (error, CCNET_DOMAIN, CCNET_ERR_INTERNAL, "Bad arguments"); return -1; } return ccnet_group_manager_set_group_creator (group_mgr, group_id, user_name); } GList * ccnet_rpc_get_groups_members (const char *group_ids, GError **error) { if (!group_ids || g_strcmp0(group_ids, "") == 0) { g_set_error (error, CCNET_DOMAIN, CCNET_ERR_INTERNAL, "Bad arguments"); return NULL; } CcnetGroupManager *group_mgr = seaf->group_mgr; return ccnet_group_manager_get_groups_members (group_mgr, group_ids, error); } int ccnet_rpc_create_org (const char *org_name, const char *url_prefix, const char *creator, GError **error) { CcnetOrgManager *org_mgr = seaf->org_mgr; if (!org_name || !url_prefix || !creator) { g_set_error (error, CCNET_DOMAIN, CCNET_ERR_INTERNAL, "Bad arguments"); return -1; } return ccnet_org_manager_create_org (org_mgr, org_name, url_prefix, creator, error); } int ccnet_rpc_remove_org (int org_id, GError **error) { GList *group_ids = NULL, *email_list=NULL, *ptr; const char *url_prefix = NULL; CcnetOrgManager *org_mgr = seaf->org_mgr; CcnetUserManager *user_mgr = seaf->user_mgr; CcnetGroupManager *group_mgr = seaf->group_mgr; if (org_id < 0) { g_set_error (error, CCNET_DOMAIN, CCNET_ERR_INTERNAL, "Bad arguments"); return -1; } url_prefix = ccnet_org_manager_get_url_prefix_by_org_id (org_mgr, org_id, error); email_list = ccnet_org_manager_get_org_emailusers (org_mgr, url_prefix, 0, INT_MAX); ptr = email_list; while (ptr) { ccnet_user_manager_remove_emailuser (user_mgr, "DB", (gchar *)ptr->data); ptr = ptr->next; } string_list_free (email_list); group_ids = ccnet_org_manager_get_org_group_ids (org_mgr, org_id, 0, INT_MAX); ptr = group_ids; while (ptr) { ccnet_group_manager_remove_group (group_mgr, (int)(long)ptr->data, TRUE, error); ptr = ptr->next; } g_list_free (group_ids); return ccnet_org_manager_remove_org (org_mgr, org_id, error); } GList * ccnet_rpc_get_all_orgs (int start, int limit, GError **error) { CcnetOrgManager *org_mgr = seaf->org_mgr; GList *ret = NULL; ret = ccnet_org_manager_get_all_orgs (org_mgr, start, limit); return ret; } gint64 ccnet_rpc_count_orgs (GError **error) { CcnetOrgManager *org_mgr = seaf->org_mgr; return ccnet_org_manager_count_orgs(org_mgr); } GObject * ccnet_rpc_get_org_by_url_prefix (const char *url_prefix, GError **error) { CcnetOrganization *org = NULL; CcnetOrgManager *org_mgr = seaf->org_mgr; if (!url_prefix) { g_set_error (error, CCNET_DOMAIN, CCNET_ERR_INTERNAL, "Bad arguments"); return NULL; } org = ccnet_org_manager_get_org_by_url_prefix (org_mgr, url_prefix, error); if (!org) return NULL; return (GObject *)org; } GObject * ccnet_rpc_get_org_by_id (int org_id, GError **error) { CcnetOrganization *org = NULL; CcnetOrgManager *org_mgr = seaf->org_mgr; if (org_id <= 0) { g_set_error (error, CCNET_DOMAIN, CCNET_ERR_INTERNAL, "Bad arguments"); return NULL; } org = ccnet_org_manager_get_org_by_id (org_mgr, org_id, error); if (!org) return NULL; return (GObject *)org; } int ccnet_rpc_add_org_user (int org_id, const char *email, int is_staff, GError **error) { CcnetOrgManager *org_mgr = seaf->org_mgr; if (org_id < 0 || !email) { g_set_error (error, CCNET_DOMAIN, CCNET_ERR_INTERNAL, "Bad arguments"); return -1; } return ccnet_org_manager_add_org_user (org_mgr, org_id, email, is_staff, error); } int ccnet_rpc_remove_org_user (int org_id, const char *email, GError **error) { CcnetOrgManager *org_mgr = seaf->org_mgr; if (org_id < 0 || !email) { g_set_error (error, CCNET_DOMAIN, CCNET_ERR_INTERNAL, "Bad arguments"); return -1; } return ccnet_org_manager_remove_org_user (org_mgr, org_id, email, error); } GList * ccnet_rpc_get_orgs_by_user (const char *email, GError **error) { CcnetOrgManager *org_mgr = seaf->org_mgr; GList *org_list = NULL; org_list = ccnet_org_manager_get_orgs_by_user (org_mgr, email, error); return org_list; } GList * ccnet_rpc_get_org_emailusers (const char *url_prefix, int start , int limit, GError **error) { CcnetUserManager *user_mgr = seaf->user_mgr; CcnetOrgManager *org_mgr = seaf->org_mgr; GList *email_list = NULL, *ptr; GList *ret = NULL; if (!url_prefix) { g_set_error (error, CCNET_DOMAIN, CCNET_ERR_INTERNAL, "Bad arguments"); return NULL; } email_list = ccnet_org_manager_get_org_emailusers (org_mgr, url_prefix, start, limit); if (email_list == NULL) { return NULL; } ptr = email_list; while (ptr) { char *email = ptr->data; CcnetEmailUser *emailuser = ccnet_user_manager_get_emailuser (user_mgr, email, NULL); if (emailuser != NULL) { ret = g_list_prepend (ret, emailuser); } ptr = ptr->next; } string_list_free (email_list); return g_list_reverse (ret); } int ccnet_rpc_add_org_group (int org_id, int group_id, GError **error) { CcnetOrgManager *org_mgr = seaf->org_mgr; if (org_id < 0 || group_id < 0) { g_set_error (error, CCNET_DOMAIN, CCNET_ERR_INTERNAL, "Bad arguments"); return -1; } return ccnet_org_manager_add_org_group (org_mgr, org_id, group_id, error); } int ccnet_rpc_remove_org_group (int org_id, int group_id, GError **error) { CcnetOrgManager *org_mgr = seaf->org_mgr; if (org_id < 0 || group_id < 0) { g_set_error (error, CCNET_DOMAIN, CCNET_ERR_INTERNAL, "Bad arguments"); return -1; } return ccnet_org_manager_remove_org_group (org_mgr, org_id, group_id, error); } int ccnet_rpc_is_org_group (int group_id, GError **error) { CcnetOrgManager *org_mgr = seaf->org_mgr; if (group_id <= 0) { g_set_error (error, CCNET_DOMAIN, CCNET_ERR_INTERNAL, "Bad arguments"); return -1; } return ccnet_org_manager_is_org_group (org_mgr, group_id, error); } int ccnet_rpc_get_org_id_by_group (int group_id, GError **error) { CcnetOrgManager *org_mgr = seaf->org_mgr; if (group_id <= 0) { g_set_error (error, CCNET_DOMAIN, CCNET_ERR_INTERNAL, "Bad arguments"); return -1; } return ccnet_org_manager_get_org_id_by_group (org_mgr, group_id, error); } GList * ccnet_rpc_get_org_groups (int org_id, int start, int limit, GError **error) { CcnetOrgManager *org_mgr = seaf->org_mgr; GList *ret = NULL; if (org_id < 0) { g_set_error (error, CCNET_DOMAIN, CCNET_ERR_INTERNAL, "Bad arguments"); return NULL; } /* correct parameter */ if (start < 0 ) { start = 0; } ret = ccnet_org_manager_get_org_groups (org_mgr, org_id, start, limit); return ret; } GList * ccnet_rpc_get_org_groups_by_user (const char *user, int org_id, GError **error) { CcnetOrgManager *org_mgr = seaf->org_mgr; GList *ret = NULL; if (org_id < 0 || !user) { g_set_error (error, CCNET_DOMAIN, CCNET_ERR_INTERNAL, "Bad arguments"); return NULL; } ret = ccnet_org_manager_get_org_groups_by_user (org_mgr, user, org_id); return ret; } GList * ccnet_rpc_get_org_top_groups (int org_id, GError **error) { CcnetOrgManager *org_mgr = seaf->org_mgr; GList *ret = NULL; if (org_id < 0) { g_set_error (error, CCNET_DOMAIN, CCNET_ERR_INTERNAL, "Bad arguments"); return NULL; } ret = ccnet_org_manager_get_org_top_groups (org_mgr, org_id, error); return ret; } int ccnet_rpc_org_user_exists (int org_id, const char *email, GError **error) { CcnetOrgManager *org_mgr = seaf->org_mgr; if (org_id < 0 || !email) { g_set_error (error, CCNET_DOMAIN, CCNET_ERR_INTERNAL, "Bad arguments"); return -1; } return ccnet_org_manager_org_user_exists (org_mgr, org_id, email, error); } int ccnet_rpc_is_org_staff (int org_id, const char *email, GError **error) { CcnetOrgManager *org_mgr = seaf->org_mgr; if (org_id < 0 || !email) { g_set_error (error, CCNET_DOMAIN, CCNET_ERR_INTERNAL, "Bad arguments"); return -1; } return ccnet_org_manager_is_org_staff (org_mgr, org_id, email, error); } int ccnet_rpc_set_org_staff (int org_id, const char *email, GError **error) { CcnetOrgManager *org_mgr = seaf->org_mgr; if (org_id < 0 || !email) { g_set_error (error, CCNET_DOMAIN, CCNET_ERR_INTERNAL, "Bad arguments"); return -1; } return ccnet_org_manager_set_org_staff (org_mgr, org_id, email, error); } int ccnet_rpc_unset_org_staff (int org_id, const char *email, GError **error) { CcnetOrgManager *org_mgr = seaf->org_mgr; if (org_id < 0 || !email) { g_set_error (error, CCNET_DOMAIN, CCNET_ERR_INTERNAL, "Bad arguments"); return -1; } return ccnet_org_manager_unset_org_staff (org_mgr, org_id, email, error); } int ccnet_rpc_set_org_name (int org_id, const char *org_name, GError **error) { CcnetOrgManager *org_mgr = seaf->org_mgr; if (org_id < 0 || !org_name) { g_set_error (error, CCNET_DOMAIN, CCNET_ERR_INTERNAL, "Bad arguments"); return -1; } return ccnet_org_manager_set_org_name (org_mgr, org_id, org_name, error); } #endif /* SEAFILE_SERVER */ ================================================ FILE: common/seaf-db.c ================================================ #include "common.h" #include "log.h" #include "seaf-db.h" #include #ifdef HAVE_MYSQL #include #include #endif #include #include struct DBConnPool { GPtrArray *connections; pthread_mutex_t lock; int max_connections; }; typedef struct DBConnPool DBConnPool; struct SeafDB { int type; DBConnPool *pool; }; typedef struct DBConnection { gboolean is_available; gboolean delete_pending; DBConnPool *pool; } DBConnection; struct SeafDBRow { /* Empty */ }; struct SeafDBTrans { DBConnection *conn; gboolean need_close; }; typedef struct DBOperations { DBConnection* (*get_connection)(SeafDB *db); void (*release_connection)(DBConnection *conn, gboolean need_close); int (*execute_sql_no_stmt)(DBConnection *conn, const char *sql, gboolean *retry); int (*execute_sql)(DBConnection *conn, const char *sql, int n, va_list args, gboolean *retry); int (*query_foreach_row)(DBConnection *conn, const char *sql, SeafDBRowFunc callback, void *data, int n, va_list args, gboolean *retry); int (*row_get_column_count)(SeafDBRow *row); const char* (*row_get_column_string)(SeafDBRow *row, int idx); int (*row_get_column_int)(SeafDBRow *row, int idx); gint64 (*row_get_column_int64)(SeafDBRow *row, int idx); } DBOperations; static DBOperations db_ops; #ifdef HAVE_MYSQL /* MySQL Ops */ static SeafDB * mysql_db_new (const char *host, int port, const char *user, const char *password, const char *db_name, const char *unix_socket, gboolean use_ssl, gboolean skip_verify, const char *ca_path, const char *charset); static DBConnection * mysql_db_get_connection (SeafDB *db); static void mysql_db_release_connection (DBConnection *vconn); static int mysql_db_execute_sql_no_stmt (DBConnection *vconn, const char *sql, gboolean *retry); static int mysql_db_execute_sql (DBConnection *vconn, const char *sql, int n, va_list args, gboolean *retry); static int mysql_db_query_foreach_row (DBConnection *vconn, const char *sql, SeafDBRowFunc callback, void *data, int n, va_list args, gboolean *retry); static int mysql_db_row_get_column_count (SeafDBRow *row); static const char * mysql_db_row_get_column_string (SeafDBRow *row, int idx); static int mysql_db_row_get_column_int (SeafDBRow *row, int idx); static gint64 mysql_db_row_get_column_int64 (SeafDBRow *row, int idx); static gboolean mysql_db_connection_ping (DBConnection *vconn); static DBConnPool * init_conn_pool_common (int max_connections) { DBConnPool *pool = g_new0(DBConnPool, 1); pool->connections = g_ptr_array_sized_new (max_connections); pthread_mutex_init (&pool->lock, NULL); pool->max_connections = max_connections; return pool; } static DBConnection * mysql_conn_pool_get_connection (SeafDB *db) { DBConnPool *pool = db->pool; DBConnection *conn = NULL; DBConnection *d_conn = NULL; if (pool->max_connections == 0) { conn = mysql_db_get_connection (db); conn->pool = pool; return conn; } pthread_mutex_lock (&pool->lock); guint i, size = pool->connections->len; for (i = 0; i < size; ++i) { conn = g_ptr_array_index (pool->connections, i); if (!conn->is_available) { continue; } if (mysql_db_connection_ping (conn)) { conn->is_available = FALSE; goto out; } conn->is_available = FALSE; conn->delete_pending = TRUE; } conn = NULL; if (size < pool->max_connections) { conn = mysql_db_get_connection (db); if (conn) { conn->pool = pool; conn->is_available = FALSE; g_ptr_array_add (pool->connections, conn); } } out: size = pool->connections->len; if (size > 0) { int index; for (index = size - 1; index >= 0; index--) { d_conn = g_ptr_array_index (pool->connections, index); if (d_conn->delete_pending) { g_ptr_array_remove (pool->connections, d_conn); mysql_db_release_connection (d_conn); } } } pthread_mutex_unlock (&pool->lock); return conn; } static void mysql_conn_pool_release_connection (DBConnection *conn, gboolean need_close) { if (!conn) return; if (conn->pool->max_connections == 0) { mysql_db_release_connection (conn); return; } if (need_close) { pthread_mutex_lock (&conn->pool->lock); g_ptr_array_remove (conn->pool->connections, conn); pthread_mutex_unlock (&conn->pool->lock); mysql_db_release_connection (conn); return; } pthread_mutex_lock (&conn->pool->lock); conn->is_available = TRUE; pthread_mutex_unlock (&conn->pool->lock); } #define KEEPALIVE_INTERVAL 30 static void * mysql_conn_keepalive (void *arg) { DBConnPool *pool = arg; DBConnection *conn = NULL; DBConnection *d_conn = NULL; char *sql = "SELECT 1;"; int rc = 0; va_list args; while (1) { pthread_mutex_lock (&pool->lock); guint i, size = pool->connections->len; for (i = 0; i < size; ++i) { conn = g_ptr_array_index (pool->connections, i); if (conn->is_available) { rc = db_ops.execute_sql (conn, sql, 0, args, NULL); if (rc < 0) { conn->is_available = FALSE; conn->delete_pending = TRUE; } } } if (size > 0) { int index; for (index = size - 1; index >= 0; index--) { d_conn = g_ptr_array_index (pool->connections, index); if (d_conn->delete_pending) { g_ptr_array_remove (pool->connections, d_conn); mysql_db_release_connection (d_conn); } } } pthread_mutex_unlock (&pool->lock); sleep (KEEPALIVE_INTERVAL); } return NULL; } SeafDB * seaf_db_new_mysql (const char *host, int port, const char *user, const char *passwd, const char *db_name, const char *unix_socket, gboolean use_ssl, gboolean skip_verify, const char *ca_path, const char *charset, int max_connections) { SeafDB *db; db = mysql_db_new (host, port, user, passwd, db_name, unix_socket, use_ssl, skip_verify, ca_path, charset); if (!db) return NULL; db->type = SEAF_DB_TYPE_MYSQL; db_ops.get_connection = mysql_conn_pool_get_connection; db_ops.release_connection = mysql_conn_pool_release_connection; db_ops.execute_sql_no_stmt = mysql_db_execute_sql_no_stmt; db_ops.execute_sql = mysql_db_execute_sql; db_ops.query_foreach_row = mysql_db_query_foreach_row; db_ops.row_get_column_count = mysql_db_row_get_column_count; db_ops.row_get_column_string = mysql_db_row_get_column_string; db_ops.row_get_column_int = mysql_db_row_get_column_int; db_ops.row_get_column_int64 = mysql_db_row_get_column_int64; db->pool = init_conn_pool_common (max_connections); pthread_t tid; int ret = pthread_create (&tid, NULL, mysql_conn_keepalive, db->pool); if (ret != 0) { seaf_warning ("Failed to create mysql connection keepalive thread.\n"); return NULL; } pthread_detach (tid); return db; } #endif /* SQLite Ops */ static SeafDB * sqlite_db_new (const char *db_path); static DBConnection * sqlite_db_get_connection (SeafDB *db); static void sqlite_db_release_connection (DBConnection *vconn, gboolean need_close); static int sqlite_db_execute_sql_no_stmt (DBConnection *vconn, const char *sql, gboolean *retry); static int sqlite_db_execute_sql (DBConnection *vconn, const char *sql, int n, va_list args, gboolean *retry); static int sqlite_db_query_foreach_row (DBConnection *vconn, const char *sql, SeafDBRowFunc callback, void *data, int n, va_list args, gboolean *retry); static int sqlite_db_row_get_column_count (SeafDBRow *row); static const char * sqlite_db_row_get_column_string (SeafDBRow *row, int idx); static int sqlite_db_row_get_column_int (SeafDBRow *row, int idx); static gint64 sqlite_db_row_get_column_int64 (SeafDBRow *row, int idx); SeafDB * seaf_db_new_sqlite (const char *db_path, int max_connections) { SeafDB *db; db = sqlite_db_new (db_path); if (!db) return NULL; db->type = SEAF_DB_TYPE_SQLITE; db_ops.get_connection = sqlite_db_get_connection; db_ops.release_connection = sqlite_db_release_connection; db_ops.execute_sql_no_stmt = sqlite_db_execute_sql_no_stmt; db_ops.execute_sql = sqlite_db_execute_sql; db_ops.query_foreach_row = sqlite_db_query_foreach_row; db_ops.row_get_column_count = sqlite_db_row_get_column_count; db_ops.row_get_column_string = sqlite_db_row_get_column_string; db_ops.row_get_column_int = sqlite_db_row_get_column_int; db_ops.row_get_column_int64 = sqlite_db_row_get_column_int64; return db; } int seaf_db_type (SeafDB *db) { return db->type; } int seaf_db_query (SeafDB *db, const char *sql) { int ret = -1; int retry_count = 0; while (ret < 0) { gboolean retry = FALSE; DBConnection *conn = db_ops.get_connection (db); if (!conn) return -1; ret = db_ops.execute_sql_no_stmt (conn, sql, &retry); db_ops.release_connection (conn, ret < 0); if (!retry || retry_count >= 3) { break; } retry_count++; seaf_warning ("The mysql connection has expired, creating a new connection to re-query.\n"); } return ret; } gboolean seaf_db_check_for_existence (SeafDB *db, const char *sql, gboolean *db_err) { return seaf_db_statement_exists (db, sql, db_err, 0); } int seaf_db_foreach_selected_row (SeafDB *db, const char *sql, SeafDBRowFunc callback, void *data) { return seaf_db_statement_foreach_row (db, sql, callback, data, 0); } const char * seaf_db_row_get_column_text (SeafDBRow *row, guint32 idx) { g_return_val_if_fail (idx < db_ops.row_get_column_count(row), NULL); return db_ops.row_get_column_string (row, idx); } int seaf_db_row_get_column_int (SeafDBRow *row, guint32 idx) { g_return_val_if_fail (idx < db_ops.row_get_column_count(row), -1); return db_ops.row_get_column_int (row, idx); } gint64 seaf_db_row_get_column_int64 (SeafDBRow *row, guint32 idx) { g_return_val_if_fail (idx < db_ops.row_get_column_count(row), -1); return db_ops.row_get_column_int64 (row, idx); } int seaf_db_get_int (SeafDB *db, const char *sql) { return seaf_db_statement_get_int (db, sql, 0); } gint64 seaf_db_get_int64 (SeafDB *db, const char *sql) { return seaf_db_statement_get_int64 (db, sql, 0); } char * seaf_db_get_string (SeafDB *db, const char *sql) { return seaf_db_statement_get_string (db, sql, 0); } int seaf_db_statement_query (SeafDB *db, const char *sql, int n, ...) { int ret = -1; int retry_count = 0; while (ret < 0) { gboolean retry = FALSE; DBConnection *conn = db_ops.get_connection (db); if (!conn) return -1; va_list args; va_start (args, n); ret = db_ops.execute_sql (conn, sql, n, args, &retry); va_end (args); db_ops.release_connection (conn, ret < 0); if (!retry || retry_count >= 3) { break; } retry_count++; seaf_warning ("The mysql connection has expired, creating a new connection to re-query.\n"); } return ret; } gboolean seaf_db_statement_exists (SeafDB *db, const char *sql, gboolean *db_err, int n, ...) { int n_rows = -1; int retry_count = 0; while (n_rows < 0) { gboolean retry = FALSE; DBConnection *conn = db_ops.get_connection(db); if (!conn) { *db_err = TRUE; return FALSE; } va_list args; va_start (args, n); n_rows = db_ops.query_foreach_row (conn, sql, NULL, NULL, n, args, &retry); va_end (args); db_ops.release_connection(conn, n_rows < 0); if (!retry || retry_count >= 3) { break; } retry_count++; seaf_warning ("The mysql connection has expired, creating a new connection to re-query.\n"); } if (n_rows < 0) { *db_err = TRUE; return FALSE; } else { *db_err = FALSE; return (n_rows != 0); } } int seaf_db_statement_foreach_row (SeafDB *db, const char *sql, SeafDBRowFunc callback, void *data, int n, ...) { int ret = -1; int retry_count = 0; while (ret < 0) { gboolean retry = FALSE; DBConnection *conn = db_ops.get_connection (db); if (!conn) return -1; va_list args; va_start (args, n); ret = db_ops.query_foreach_row (conn, sql, callback, data, n, args, &retry); va_end (args); db_ops.release_connection (conn, ret < 0); if (!retry || retry_count >= 3) { break; } retry_count++; seaf_warning ("The mysql connection has expired, creating a new connection to re-query.\n"); } return ret; } static gboolean get_int_cb (SeafDBRow *row, void *data) { int *pret = (int*)data; *pret = seaf_db_row_get_column_int (row, 0); return FALSE; } int seaf_db_statement_get_int (SeafDB *db, const char *sql, int n, ...) { int ret = -1; int rc = -1; int retry_count = 0; while (rc < 0) { gboolean retry = FALSE; DBConnection *conn = db_ops.get_connection (db); if (!conn) return -1; va_list args; va_start (args, n); rc = db_ops.query_foreach_row (conn, sql, get_int_cb, &ret, n, args, &retry); va_end (args); db_ops.release_connection (conn, rc < 0); if (!retry || retry_count >= 3) { break; } retry_count++; seaf_warning ("The mysql connection has expired, creating a new connection to re-query.\n"); } return ret; } static gboolean get_int64_cb (SeafDBRow *row, void *data) { gint64 *pret = (gint64*)data; *pret = seaf_db_row_get_column_int64 (row, 0); return FALSE; } gint64 seaf_db_statement_get_int64 (SeafDB *db, const char *sql, int n, ...) { gint64 ret = -1; int rc = -1; int retry_count = 0; while (rc < 0) { gboolean retry = FALSE; DBConnection *conn = db_ops.get_connection (db); if (!conn) return -1; va_list args; va_start (args, n); rc = db_ops.query_foreach_row (conn, sql, get_int64_cb, &ret, n, args, &retry); va_end(args); db_ops.release_connection (conn, rc < 0); if (!retry || retry_count >= 3) { break; } retry_count++; seaf_warning ("The mysql connection has expired, creating a new connection to re-query.\n"); } return ret; } static gboolean get_string_cb (SeafDBRow *row, void *data) { char **pret = (char**)data; *pret = g_strdup(seaf_db_row_get_column_text (row, 0)); return FALSE; } char * seaf_db_statement_get_string (SeafDB *db, const char *sql, int n, ...) { char *ret = NULL; int rc = -1; int retry_count = 0; while (rc < 0) { gboolean retry = FALSE; DBConnection *conn = db_ops.get_connection (db); if (!conn) return NULL; va_list args; va_start (args, n); rc = db_ops.query_foreach_row (conn, sql, get_string_cb, &ret, n, args, &retry); va_end(args); db_ops.release_connection (conn, rc < 0); if (!retry || retry_count >= 3) { break; } retry_count++; seaf_warning ("The mysql connection has expired, creating a new connection to re-query.\n"); } return ret; } /* Transaction */ SeafDBTrans * seaf_db_begin_transaction (SeafDB *db) { SeafDBTrans *trans = NULL; DBConnection *conn = db_ops.get_connection(db); if (!conn) { return trans; } if (db_ops.execute_sql_no_stmt (conn, "BEGIN", NULL) < 0) { db_ops.release_connection (conn, TRUE); return trans; } trans = g_new0 (SeafDBTrans, 1); trans->conn = conn; return trans; } void seaf_db_trans_close (SeafDBTrans *trans) { db_ops.release_connection (trans->conn, trans->need_close); g_free (trans); } int seaf_db_commit (SeafDBTrans *trans) { DBConnection *conn = trans->conn; if (db_ops.execute_sql_no_stmt (conn, "COMMIT", NULL) < 0) { trans->need_close = TRUE; return -1; } return 0; } int seaf_db_rollback (SeafDBTrans *trans) { DBConnection *conn = trans->conn; if (db_ops.execute_sql_no_stmt (conn, "ROLLBACK", NULL) < 0) { trans->need_close = TRUE; return -1; } return 0; } int seaf_db_trans_query (SeafDBTrans *trans, const char *sql, int n, ...) { int ret; va_list args; va_start (args, n); ret = db_ops.execute_sql (trans->conn, sql, n, args, NULL); va_end (args); if (ret < 0) trans->need_close = TRUE; return ret; } gboolean seaf_db_trans_check_for_existence (SeafDBTrans *trans, const char *sql, gboolean *db_err, int n, ...) { int n_rows; va_list args; va_start (args, n); n_rows = db_ops.query_foreach_row (trans->conn, sql, NULL, NULL, n, args, NULL); va_end (args); if (n_rows < 0) { trans->need_close = TRUE; *db_err = TRUE; return FALSE; } else { *db_err = FALSE; return (n_rows != 0); } } int seaf_db_trans_foreach_selected_row (SeafDBTrans *trans, const char *sql, SeafDBRowFunc callback, void *data, int n, ...) { int ret; va_list args; va_start (args, n); ret = db_ops.query_foreach_row (trans->conn, sql, callback, data, n, args, NULL); va_end (args); if (ret < 0) trans->need_close = TRUE; return ret; } int seaf_db_row_get_column_count (SeafDBRow *row) { return db_ops.row_get_column_count(row); } #ifdef HAVE_MYSQL /* MySQL DB */ typedef struct MySQLDB { struct SeafDB parent; char *host; char *user; char *password; unsigned int port; char *db_name; char *unix_socket; gboolean use_ssl; gboolean skip_verify; char *ca_path; char *charset; } MySQLDB; typedef struct MySQLDBConnection { struct DBConnection parent; MYSQL *db_conn; } MySQLDBConnection; static gboolean mysql_db_connection_ping (DBConnection *vconn) { MySQLDBConnection *conn = (MySQLDBConnection *)vconn; return (mysql_ping (conn->db_conn) == 0); } static SeafDB * mysql_db_new (const char *host, int port, const char *user, const char *password, const char *db_name, const char *unix_socket, gboolean use_ssl, gboolean skip_verify, const char *ca_path, const char *charset) { MySQLDB *db = g_new0 (MySQLDB, 1); db->host = g_strdup (host); db->user = g_strdup (user); db->password = g_strdup (password); db->port = port; db->db_name = g_strdup(db_name); db->unix_socket = g_strdup(unix_socket); db->use_ssl = use_ssl; db->skip_verify = skip_verify; db->ca_path = g_strdup(ca_path); db->charset = g_strdup(charset); mysql_library_init (0, NULL, NULL); return (SeafDB *)db; } typedef char my_bool; static DBConnection * mysql_db_get_connection (SeafDB *vdb) { MySQLDB *db = (MySQLDB *)vdb; int conn_timeout = 1; int read_write_timeout = 60; MYSQL *db_conn; MySQLDBConnection *conn = NULL; int ssl_mode; db_conn = mysql_init (NULL); if (!db_conn) { seaf_warning ("Failed to init mysql connection object.\n"); return NULL; } if (db->use_ssl && !db->skip_verify) { #ifndef LIBMARIADB // Set ssl_mode to SSL_MODE_VERIFY_IDENTITY to verify server cert. // When ssl_mode is set to SSL_MODE_VERIFY_IDENTITY, MYSQL_OPT_SSL_CA is required to verify server cert. // Refer to: https://dev.mysql.com/doc/c-api/5.7/en/mysql-options.html ssl_mode = SSL_MODE_VERIFY_IDENTITY; mysql_options(db_conn, MYSQL_OPT_SSL_MODE, &ssl_mode); mysql_options(db_conn, MYSQL_OPT_SSL_CA, db->ca_path); #else static my_bool verify= 1; mysql_optionsv(db_conn, MYSQL_OPT_SSL_VERIFY_SERVER_CERT, (void *)&verify); mysql_options(db_conn, MYSQL_OPT_SSL_CA, db->ca_path); #endif } else if (db->use_ssl && db->skip_verify) { #ifndef LIBMARIADB // Set ssl_mode to SSL_MODE_PREFERRED to skip verify server cert. ssl_mode = SSL_MODE_PREFERRED; mysql_options(db_conn, MYSQL_OPT_SSL_MODE, &ssl_mode); #else static my_bool verify= 0; mysql_optionsv(db_conn, MYSQL_OPT_SSL_VERIFY_SERVER_CERT, (void *)&verify); #endif } else { #ifdef LIBMARIADB static my_bool verify= 0; mysql_optionsv(db_conn, MYSQL_OPT_SSL_VERIFY_SERVER_CERT, (void *)&verify); #endif } if (db->charset) mysql_options(db_conn, MYSQL_SET_CHARSET_NAME, db->charset); if (db->unix_socket) { int pro_type = MYSQL_PROTOCOL_SOCKET; mysql_options (db_conn, MYSQL_OPT_PROTOCOL, &pro_type); if (!db->user) { #ifndef LIBMARIADB mysql_options (db_conn, MYSQL_DEFAULT_AUTH, "unix_socket"); #else mysql_options (db_conn, MARIADB_OPT_UNIXSOCKET, (void *)db->unix_socket); #endif } } mysql_options(db_conn, MYSQL_OPT_CONNECT_TIMEOUT, (const char*)&conn_timeout); mysql_options(db_conn, MYSQL_OPT_READ_TIMEOUT, (const char*)&read_write_timeout); mysql_options(db_conn, MYSQL_OPT_WRITE_TIMEOUT, (const char*)&read_write_timeout); if (!mysql_real_connect(db_conn, db->host, db->user, db->password, db->db_name, db->port, db->unix_socket, CLIENT_MULTI_STATEMENTS)) { seaf_warning ("Failed to connect to MySQL: %s\n", mysql_error(db_conn)); mysql_close (db_conn); return NULL; } conn = g_new0 (MySQLDBConnection, 1); conn->db_conn = db_conn; return (DBConnection *)conn; } static void mysql_db_release_connection (DBConnection *vconn) { if (!vconn) return; MySQLDBConnection *conn = (MySQLDBConnection *)vconn; mysql_close (conn->db_conn); g_free (conn); } static int mysql_db_execute_sql_no_stmt (DBConnection *vconn, const char *sql, gboolean *retry) { MySQLDBConnection *conn = (MySQLDBConnection *)vconn; int rc; rc = mysql_query (conn->db_conn, sql); if (rc == 0) { return 0; } if (rc == CR_SERVER_GONE_ERROR || rc == CR_SERVER_LOST) { if (retry) *retry = TRUE; } seaf_warning ("Failed to execute sql %s: %s\n", sql, mysql_error(conn->db_conn)); return -1; } static MYSQL_STMT * _prepare_stmt_mysql (MYSQL *db, const char *sql, gboolean *retry) { MYSQL_STMT *stmt; stmt = mysql_stmt_init (db); if (!stmt) { seaf_warning ("mysql_stmt_init failed.\n"); return NULL; } if (mysql_stmt_prepare (stmt, sql, strlen(sql)) != 0) { int err_code = mysql_stmt_errno (stmt); if (err_code == CR_SERVER_GONE_ERROR || err_code == CR_SERVER_LOST) { if (retry) *retry = TRUE; } seaf_warning ("Failed to prepare sql %s: %s\n", sql, mysql_stmt_error(stmt)); mysql_stmt_close (stmt); return NULL; } return stmt; } static int _bind_params_mysql (MYSQL_STMT *stmt, MYSQL_BIND *params, int n, va_list args) { int i; const char *type; for (i = 0; i < n; ++i) { type = va_arg (args, const char *); if (strcmp(type, "int") == 0) { int x = va_arg (args, int); int *pval = g_new (int, 1); *pval = x; params[i].buffer_type = MYSQL_TYPE_LONG; params[i].buffer = pval; params[i].is_null = 0; } else if (strcmp (type, "int64") == 0) { gint64 x = va_arg (args, gint64); gint64 *pval = g_new (gint64, 1); *pval = x; params[i].buffer_type = MYSQL_TYPE_LONGLONG; params[i].buffer = pval; params[i].is_null = 0; } else if (strcmp (type, "string") == 0) { const char *s = va_arg (args, const char *); static my_bool yes = TRUE; params[i].buffer_type = MYSQL_TYPE_STRING; params[i].buffer = g_strdup(s); unsigned long *plen = g_new (unsigned long, 1); params[i].length = plen; if (!s) { *plen = 0; params[i].buffer_length = 0; params[i].is_null = &yes; } else { *plen = strlen(s); params[i].buffer_length = *plen + 1; params[i].is_null = 0; } } else { seaf_warning ("BUG: invalid prep stmt parameter type %s.\n", type); g_return_val_if_reached (-1); } } if (mysql_stmt_bind_param (stmt, params) != 0) { return -1; } return 0; } static int mysql_db_execute_sql (DBConnection *vconn, const char *sql, int n, va_list args, gboolean *retry) { MySQLDBConnection *conn = (MySQLDBConnection *)vconn; MYSQL *db = conn->db_conn; MYSQL_STMT *stmt = NULL; MYSQL_BIND *params = NULL; int ret = 0; stmt = _prepare_stmt_mysql (db, sql, retry); if (!stmt) { return -1; } if (n > 0) { params = g_new0 (MYSQL_BIND, n); if (_bind_params_mysql (stmt, params, n, args) < 0) { seaf_warning ("Failed to bind parameters for %s: %s.\n", sql, mysql_stmt_error(stmt)); ret = -1; goto out; } } if (mysql_stmt_execute (stmt) != 0) { seaf_warning ("Failed to execute sql %s: %s\n", sql, mysql_stmt_error(stmt)); ret = -1; goto out; } out: if (ret < 0) { int err_code = mysql_stmt_errno (stmt); if (err_code == CR_SERVER_GONE_ERROR || err_code == CR_SERVER_LOST) { if (retry) *retry = TRUE; } } if (stmt) mysql_stmt_close (stmt); if (params) { int i; for (i = 0; i < n; ++i) { g_free (params[i].buffer); g_free (params[i].length); } g_free (params); } return ret; } typedef struct MySQLDBRow { SeafDBRow parent; int column_count; MYSQL_STMT *stmt; MYSQL_BIND *results; /* Used when returned columns are truncated. */ MYSQL_BIND *new_binds; } MySQLDBRow; #define DEFAULT_MYSQL_COLUMN_SIZE 1024 static int mysql_db_query_foreach_row (DBConnection *vconn, const char *sql, SeafDBRowFunc callback, void *data, int n, va_list args, gboolean *retry) { MySQLDBConnection *conn = (MySQLDBConnection *)vconn; MYSQL *db = conn->db_conn; MYSQL_STMT *stmt = NULL; MYSQL_BIND *params = NULL; MySQLDBRow row; int err_code; int nrows = 0; int i; memset (&row, 0, sizeof(row)); stmt = _prepare_stmt_mysql (db, sql, retry); if (!stmt) { return -1; } if (n > 0) { params = g_new0 (MYSQL_BIND, n); if (_bind_params_mysql (stmt, params, n, args) < 0) { nrows = -1; err_code = mysql_stmt_errno (stmt); if (err_code == CR_SERVER_GONE_ERROR || err_code == CR_SERVER_LOST) { if (retry) *retry = TRUE; } goto out; } } if (mysql_stmt_execute (stmt) != 0) { seaf_warning ("Failed to execute sql %s: %s\n", sql, mysql_stmt_error(stmt)); nrows = -1; err_code = mysql_stmt_errno (stmt); if (err_code == CR_SERVER_GONE_ERROR || err_code == CR_SERVER_LOST) { if (retry) *retry = TRUE; } goto out; } row.column_count = mysql_stmt_field_count (stmt); row.stmt = stmt; row.results = g_new0 (MYSQL_BIND, row.column_count); for (i = 0; i < row.column_count; ++i) { row.results[i].buffer = g_malloc (DEFAULT_MYSQL_COLUMN_SIZE + 1); /* Ask MySQL to convert fields to string, to avoid the trouble of * checking field types. */ row.results[i].buffer_type = MYSQL_TYPE_STRING; row.results[i].buffer_length = DEFAULT_MYSQL_COLUMN_SIZE; row.results[i].length = g_new0 (unsigned long, 1); row.results[i].is_null = g_new0 (my_bool, 1); } row.new_binds = g_new0 (MYSQL_BIND, row.column_count); if (mysql_stmt_bind_result (stmt, row.results) != 0) { seaf_warning ("Failed to bind result for sql %s: %s\n", sql, mysql_stmt_error(stmt)); nrows = -1; err_code = mysql_stmt_errno (stmt); if (err_code == CR_SERVER_GONE_ERROR || err_code == CR_SERVER_LOST) { if (retry) *retry = TRUE; } goto out; } int rc; gboolean next_row = TRUE; while (1) { rc = mysql_stmt_fetch (stmt); if (rc == 1) { seaf_warning ("Failed to fetch result for sql %s: %s\n", sql, mysql_stmt_error(stmt)); nrows = -1; // Don't need to retry, some rows may have been fetched. goto out; } if (rc == MYSQL_NO_DATA) break; /* rc == 0 or rc == MYSQL_DATA_TRUNCATED */ ++nrows; if (callback) next_row = callback ((SeafDBRow *)&row, data); for (i = 0; i < row.column_count; ++i) { g_free (row.new_binds[i].buffer); g_free (row.new_binds[i].length); g_free (row.new_binds[i].is_null); memset (&row.new_binds[i], 0, sizeof(MYSQL_BIND)); } if (!next_row) break; } out: if (stmt) { mysql_stmt_free_result (stmt); mysql_stmt_close (stmt); } if (params) { for (i = 0; i < n; ++i) { g_free (params[i].buffer); g_free (params[i].length); } g_free (params); } if (row.results) { for (i = 0; i < row.column_count; ++i) { g_free (row.results[i].buffer); g_free (row.results[i].length); g_free (row.results[i].is_null); } g_free (row.results); } if (row.new_binds) { for (i = 0; i < row.column_count; ++i) { g_free (row.new_binds[i].buffer); g_free (row.new_binds[i].length); g_free (row.new_binds[i].is_null); } g_free (row.new_binds); } return nrows; } static int mysql_db_row_get_column_count (SeafDBRow *vrow) { MySQLDBRow *row = (MySQLDBRow *)vrow; return row->column_count; } static const char * mysql_db_row_get_column_string (SeafDBRow *vrow, int i) { MySQLDBRow *row = (MySQLDBRow *)vrow; if (*(row->results[i].is_null)) { return NULL; } char *ret = NULL; unsigned long real_length = *(row->results[i].length); /* If column size is larger then allocated buffer size, re-allocate a new buffer * and fetch the column directly. */ if (real_length > row->results[i].buffer_length) { row->new_binds[i].buffer = g_malloc (real_length + 1); row->new_binds[i].buffer_type = MYSQL_TYPE_STRING; row->new_binds[i].buffer_length = real_length; row->new_binds[i].length = g_new0 (unsigned long, 1); row->new_binds[i].is_null = g_new0 (my_bool, 1); if (mysql_stmt_fetch_column (row->stmt, &row->new_binds[i], i, 0) != 0) { seaf_warning ("Faield to fetch column: %s\n", mysql_stmt_error(row->stmt)); return NULL; } ret = row->new_binds[i].buffer; } else { ret = row->results[i].buffer; } ret[real_length] = 0; return ret; } static int mysql_db_row_get_column_int (SeafDBRow *vrow, int idx) { const char *str; char *e; int ret; str = mysql_db_row_get_column_string (vrow, idx); if (!str) { return 0; } errno = 0; ret = strtol (str, &e, 10); if (errno || (e == str)) { seaf_warning ("Number conversion failed.\n"); return -1; } return ret; } static gint64 mysql_db_row_get_column_int64 (SeafDBRow *vrow, int idx) { const char *str; char *e; gint64 ret; str = mysql_db_row_get_column_string (vrow, idx); if (!str) { return 0; } errno = 0; ret = strtoll (str, &e, 10); if (errno || (e == str)) { seaf_warning ("Number conversion failed.\n"); return -1; } return ret; } #endif /* HAVE_MYSQL */ /* SQLite DB */ /* SQLite thread synchronization rountines. * See https://www.sqlite.org/unlock_notify.html */ typedef struct UnlockNotification { int fired; pthread_cond_t cond; pthread_mutex_t mutex; } UnlockNotification; static void unlock_notify_cb(void **ap_arg, int n_arg) { int i; for (i = 0; i < n_arg; i++) { UnlockNotification *p = (UnlockNotification *)ap_arg[i]; pthread_mutex_lock (&p->mutex); p->fired = 1; pthread_cond_signal (&p->cond); pthread_mutex_unlock (&p->mutex); } } static int wait_for_unlock_notify(sqlite3 *db) { UnlockNotification un; un.fired = 0; pthread_mutex_init (&un.mutex, NULL); pthread_cond_init (&un.cond, NULL); int rc = sqlite3_unlock_notify(db, unlock_notify_cb, (void *)&un); if (rc == SQLITE_OK) { pthread_mutex_lock(&un.mutex); if (!un.fired) pthread_cond_wait (&un.cond, &un.mutex); pthread_mutex_unlock(&un.mutex); } pthread_cond_destroy (&un.cond); pthread_mutex_destroy (&un.mutex); return rc; } static int sqlite3_blocking_step(sqlite3_stmt *stmt) { int rc; while (SQLITE_LOCKED == (rc = sqlite3_step(stmt))) { rc = wait_for_unlock_notify(sqlite3_db_handle(stmt)); if (rc != SQLITE_OK) break; sqlite3_reset(stmt); } return rc; } static int sqlite3_blocking_prepare_v2(sqlite3 *db, const char *sql, int sql_len, sqlite3_stmt **pstmt, const char **pz) { int rc; while (SQLITE_LOCKED == (rc = sqlite3_prepare_v2(db, sql, sql_len, pstmt, pz))) { rc = wait_for_unlock_notify(db); if (rc != SQLITE_OK) break; } return rc; } static int sqlite3_blocking_exec(sqlite3 *db, const char *sql, int (*callback)(void *, int, char **, char **), void *arg, char **errmsg) { int rc; while (SQLITE_LOCKED == (rc = sqlite3_exec(db, sql, callback, arg, errmsg))) { rc = wait_for_unlock_notify(db); if (rc != SQLITE_OK) break; } return rc; } typedef struct SQLiteDB { SeafDB parent; char *db_path; } SQLiteDB; typedef struct SQLiteDBConnection { DBConnection parent; sqlite3 *db_conn; } SQLiteDBConnection; static SeafDB * sqlite_db_new (const char *db_path) { SQLiteDB *db = g_new0 (SQLiteDB, 1); db->db_path = g_strdup(db_path); return (SeafDB *)db; } static DBConnection * sqlite_db_get_connection (SeafDB *vdb) { SQLiteDB *db = (SQLiteDB *)vdb; sqlite3 *db_conn; int result; const char *errmsg; SQLiteDBConnection *conn; result = sqlite3_open_v2 (db->db_path, &db_conn, SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE | SQLITE_OPEN_SHAREDCACHE, NULL); if (result != SQLITE_OK) { errmsg = sqlite3_errmsg(db_conn); seaf_warning ("Failed to open sqlite db: %s\n", errmsg ? errmsg : "no error given"); return NULL; } conn = g_new0 (SQLiteDBConnection, 1); conn->db_conn = db_conn; return (DBConnection *)conn; } static void sqlite_db_release_connection (DBConnection *vconn, gboolean need_close) { if (!vconn) return; SQLiteDBConnection *conn = (SQLiteDBConnection *)vconn; sqlite3_close (conn->db_conn); g_free (conn); } static int sqlite_db_execute_sql_no_stmt (DBConnection *vconn, const char *sql, gboolean *retry) { SQLiteDBConnection *conn = (SQLiteDBConnection *)vconn; char *errmsg = NULL; int rc; rc = sqlite3_blocking_exec (conn->db_conn, sql, NULL, NULL, &errmsg); if (rc != SQLITE_OK) { seaf_warning ("sqlite3_exec failed %s: %s", sql, errmsg ? errmsg : "no error given"); if (errmsg) sqlite3_free (errmsg); return -1; } return 0; } static int _bind_parameters_sqlite (sqlite3 *db, sqlite3_stmt *stmt, int n, va_list args) { int i; const char *type; for (i = 0; i < n; ++i) { type = va_arg (args, const char *); if (strcmp(type, "int") == 0) { int x = va_arg (args, int); if (sqlite3_bind_int (stmt, i+1, x) != SQLITE_OK) { seaf_warning ("sqlite3_bind_int failed: %s\n", sqlite3_errmsg(db)); return -1; } } else if (strcmp (type, "int64") == 0) { gint64 x = va_arg (args, gint64); if (sqlite3_bind_int64 (stmt, i+1, x) != SQLITE_OK) { seaf_warning ("sqlite3_bind_int64 failed: %s\n", sqlite3_errmsg(db)); return -1; } } else if (strcmp (type, "string") == 0) { const char *s = va_arg (args, const char *); if (sqlite3_bind_text (stmt, i+1, s, -1, SQLITE_TRANSIENT) != SQLITE_OK) { seaf_warning ("sqlite3_bind_text failed: %s\n", sqlite3_errmsg(db)); return -1; } } else { seaf_warning ("BUG: invalid prep stmt parameter type %s.\n", type); g_return_val_if_reached (-1); } } return 0; } static int sqlite_db_execute_sql (DBConnection *vconn, const char *sql, int n, va_list args, gboolean *retry) { SQLiteDBConnection *conn = (SQLiteDBConnection *)vconn; sqlite3 *db = conn->db_conn; sqlite3_stmt *stmt; int rc; int ret = 0; rc = sqlite3_blocking_prepare_v2 (db, sql, -1, &stmt, NULL); if (rc != SQLITE_OK) { seaf_warning ("sqlite3_prepare_v2 failed %s: %s", sql, sqlite3_errmsg(db)); return -1; } if (_bind_parameters_sqlite (db, stmt, n, args) < 0) { seaf_warning ("Failed to bind parameters for sql %s\n", sql); ret = -1; goto out; } rc = sqlite3_blocking_step (stmt); if (rc != SQLITE_DONE) { seaf_warning ("sqlite3_step failed %s: %s", sql, sqlite3_errmsg(db)); ret = -1; goto out; } out: sqlite3_finalize (stmt); return ret; } typedef struct SQLiteDBRow { SeafDBRow parent; int column_count; sqlite3 *db; sqlite3_stmt *stmt; } SQLiteDBRow; static int sqlite_db_query_foreach_row (DBConnection *vconn, const char *sql, SeafDBRowFunc callback, void *data, int n, va_list args, gboolean *retry) { SQLiteDBConnection *conn = (SQLiteDBConnection *)vconn; sqlite3 *db = conn->db_conn; sqlite3_stmt *stmt; int rc; int nrows = 0; rc = sqlite3_blocking_prepare_v2 (db, sql, -1, &stmt, NULL); if (rc != SQLITE_OK) { seaf_warning ("sqlite3_prepare_v2 failed %s: %s", sql, sqlite3_errmsg(db)); return -1; } if (_bind_parameters_sqlite (db, stmt, n, args) < 0) { seaf_warning ("Failed to bind parameters for sql %s\n", sql); nrows = -1; goto out; } SQLiteDBRow row; memset (&row, 0, sizeof(row)); row.db = db; row.stmt = stmt; row.column_count = sqlite3_column_count (stmt); while (1) { rc = sqlite3_blocking_step (stmt); if (rc == SQLITE_ROW) { ++nrows; if (callback && !callback ((SeafDBRow *)&row, data)) break; } else if (rc == SQLITE_DONE) { break; } else { seaf_warning ("sqlite3_step failed %s: %s\n", sql, sqlite3_errmsg(db)); nrows = -1; goto out; } } out: sqlite3_finalize (stmt); return nrows; } static int sqlite_db_row_get_column_count (SeafDBRow *vrow) { SQLiteDBRow *row = (SQLiteDBRow *)vrow; return row->column_count; } static const char * sqlite_db_row_get_column_string (SeafDBRow *vrow, int idx) { SQLiteDBRow *row = (SQLiteDBRow *)vrow; return (const char *)sqlite3_column_text (row->stmt, idx); } static int sqlite_db_row_get_column_int (SeafDBRow *vrow, int idx) { SQLiteDBRow *row = (SQLiteDBRow *)vrow; return sqlite3_column_int (row->stmt, idx); } static gint64 sqlite_db_row_get_column_int64 (SeafDBRow *vrow, int idx) { SQLiteDBRow *row = (SQLiteDBRow *)vrow; return sqlite3_column_int64 (row->stmt, idx); } ================================================ FILE: common/seaf-db.h ================================================ #ifndef SEAF_DB_H #define SEAF_DB_H enum { SEAF_DB_TYPE_SQLITE, SEAF_DB_TYPE_MYSQL, SEAF_DB_TYPE_PGSQL, }; typedef struct SeafDB SeafDB; typedef struct SeafDB CcnetDB; typedef struct SeafDBRow SeafDBRow; typedef struct SeafDBRow CcnetDBRow; typedef struct SeafDBTrans SeafDBTrans; typedef struct SeafDBTrans CcnetDBTrans; typedef gboolean (*SeafDBRowFunc) (SeafDBRow *, void *); typedef gboolean (*CcnetDBRowFunc) (CcnetDBRow *, void *); SeafDB * seaf_db_new_mysql (const char *host, int port, const char *user, const char *passwd, const char *db, const char *unix_socket, gboolean use_ssl, gboolean skip_verify, const char *ca_path, const char *charset, int max_connections); #if 0 SeafDB * seaf_db_new_pgsql (const char *host, unsigned int port, const char *user, const char *passwd, const char *db_name, const char *unix_socket, int max_connections); #endif SeafDB * seaf_db_new_sqlite (const char *db_path, int max_connections); int seaf_db_type (SeafDB *db); int seaf_db_query (SeafDB *db, const char *sql); gboolean seaf_db_check_for_existence (SeafDB *db, const char *sql, gboolean *db_err); int seaf_db_foreach_selected_row (SeafDB *db, const char *sql, SeafDBRowFunc callback, void *data); const char * seaf_db_row_get_column_text (SeafDBRow *row, guint32 idx); int seaf_db_row_get_column_int (SeafDBRow *row, guint32 idx); gint64 seaf_db_row_get_column_int64 (SeafDBRow *row, guint32 idx); int seaf_db_get_int (SeafDB *db, const char *sql); gint64 seaf_db_get_int64 (SeafDB *db, const char *sql); char * seaf_db_get_string (SeafDB *db, const char *sql); /* Transaction related */ SeafDBTrans * seaf_db_begin_transaction (SeafDB *db); void seaf_db_trans_close (SeafDBTrans *trans); int seaf_db_commit (SeafDBTrans *trans); int seaf_db_rollback (SeafDBTrans *trans); int seaf_db_trans_query (SeafDBTrans *trans, const char *sql, int n, ...); gboolean seaf_db_trans_check_for_existence (SeafDBTrans *trans, const char *sql, gboolean *db_err, int n, ...); int seaf_db_trans_foreach_selected_row (SeafDBTrans *trans, const char *sql, SeafDBRowFunc callback, void *data, int n, ...); int seaf_db_row_get_column_count (SeafDBRow *row); /* Prepared Statements */ int seaf_db_statement_query (SeafDB *db, const char *sql, int n, ...); gboolean seaf_db_statement_exists (SeafDB *db, const char *sql, gboolean *db_err, int n, ...); int seaf_db_statement_foreach_row (SeafDB *db, const char *sql, SeafDBRowFunc callback, void *data, int n, ...); int seaf_db_statement_get_int (SeafDB *db, const char *sql, int n, ...); gint64 seaf_db_statement_get_int64 (SeafDB *db, const char *sql, int n, ...); char * seaf_db_statement_get_string (SeafDB *db, const char *sql, int n, ...); #endif ================================================ FILE: common/seaf-utils.c ================================================ #include "common.h" #include "log.h" #include "seafile-session.h" #include "seaf-utils.h" #include "seaf-db.h" #include "utils.h" #include #include #include #include #define JWT_TOKEN_EXPIRE_TIME 3*24*3600 /* 3 days*/ char * seafile_session_get_tmp_file_path (SeafileSession *session, const char *basename, char path[]) { int path_len; path_len = strlen (session->tmp_file_dir); memcpy (path, session->tmp_file_dir, path_len + 1); path[path_len] = '/'; strcpy (path + path_len + 1, basename); return path; } #define DEFAULT_MAX_CONNECTIONS 100 #define SQLITE_DB_NAME "seafile.db" #define CCNET_DB "ccnet.db" static int sqlite_db_start (SeafileSession *session) { char *db_path; int max_connections = 0; max_connections = g_key_file_get_integer (session->config, "database", "max_connections", NULL); if (max_connections <= 0) max_connections = DEFAULT_MAX_CONNECTIONS; db_path = g_build_filename (session->seaf_dir, SQLITE_DB_NAME, NULL); session->db = seaf_db_new_sqlite (db_path, max_connections); if (!session->db) { seaf_warning ("Failed to start sqlite db.\n"); return -1; } return 0; } #ifdef HAVE_MYSQL #define MYSQL_DEFAULT_PORT 3306 typedef struct DBOption { char *user; char *passwd; char *host; char *ca_path; char *charset; char *ccnet_db_name; char *seafile_db_name; gboolean use_ssl; gboolean skip_verify; int port; int max_connections; } DBOption; static void db_option_free (DBOption *option) { if (!option) return; g_free (option->user); g_free (option->passwd); g_free (option->host); g_free (option->ca_path); g_free (option->charset); g_free (option->ccnet_db_name); g_free (option->seafile_db_name); g_free (option); } static int load_db_option_from_env (DBOption *option) { const char *env_user, *env_passwd, *env_host, *env_ccnet_db, *env_seafile_db, *env_port; env_user = g_getenv("SEAFILE_MYSQL_DB_USER"); env_passwd = g_getenv("SEAFILE_MYSQL_DB_PASSWORD"); env_host = g_getenv("SEAFILE_MYSQL_DB_HOST"); env_port = g_getenv("SEAFILE_MYSQL_DB_PORT"); env_ccnet_db = g_getenv("SEAFILE_MYSQL_DB_CCNET_DB_NAME"); env_seafile_db = g_getenv("SEAFILE_MYSQL_DB_SEAFILE_DB_NAME"); if (env_user && g_strcmp0 (env_user, "") != 0) { g_free (option->user); option->user = g_strdup (env_user); } if (env_passwd && g_strcmp0 (env_passwd, "") != 0) { g_free (option->passwd); option->passwd = g_strdup (env_passwd); } if (env_host && g_strcmp0 (env_host, "") != 0) { g_free (option->host); option->host = g_strdup (env_host); } if (env_port && g_strcmp0(env_port, "") != 0) { int port = atoi(env_port); if (port > 0) { option->port = port; } } if (env_ccnet_db && g_strcmp0 (env_ccnet_db, "") != 0) { g_free (option->ccnet_db_name); option->ccnet_db_name = g_strdup (env_ccnet_db); } else if (!option->ccnet_db_name) { option->ccnet_db_name = g_strdup ("ccnet_db"); seaf_message ("Failed to read SEAFILE_MYSQL_DB_CCNET_DB_NAME, use ccnet_db by default\n"); } if (env_seafile_db && g_strcmp0 (env_seafile_db, "") != 0) { g_free (option->seafile_db_name); option->seafile_db_name = g_strdup (env_seafile_db); } else if (!option->seafile_db_name) { option->seafile_db_name = g_strdup ("seafile_db"); seaf_message ("Failed to read SEAFILE_MYSQL_DB_SEAFILE_DB_NAME, use seafile_db by default\n"); } return 0; } static DBOption * load_db_option (SeafileSession *session) { GError *error = NULL; int ret = 0; DBOption *option = g_new0 (DBOption, 1); option->host = seaf_key_file_get_string (session->config, "database", "host", NULL); option->port = g_key_file_get_integer (session->config, "database", "port", &error); if (error) { g_clear_error (&error); option->port = MYSQL_DEFAULT_PORT; } option->user = seaf_key_file_get_string (session->config, "database", "user", NULL); option->passwd = seaf_key_file_get_string (session->config, "database", "password", NULL); option->seafile_db_name = seaf_key_file_get_string (session->config, "database", "db_name", NULL); option->use_ssl = g_key_file_get_boolean (session->config, "database", "use_ssl", NULL); option->skip_verify = g_key_file_get_boolean (session->config, "database", "skip_verify", NULL); if (option->use_ssl && !option->skip_verify) { option->ca_path = seaf_key_file_get_string (session->config, "database", "ca_path", NULL); if (!option->ca_path) { seaf_warning ("ca_path is required if use ssl and don't skip verify.\n"); ret = -1; goto out; } } option->charset = seaf_key_file_get_string (session->config, "database", "connection_charset", NULL); option->max_connections = g_key_file_get_integer (session->config, "database", "max_connections", &error); if (error || option->max_connections < 0) { if (error) g_clear_error (&error); option->max_connections = DEFAULT_MAX_CONNECTIONS; } load_db_option_from_env (option); if (!option->host) { seaf_warning ("DB host not set in config.\n"); ret = -1; goto out; } if (!option->user) { seaf_warning ("DB user not set in config.\n"); ret = -1; goto out; } if (!option->passwd) { seaf_warning ("DB passwd not set in config.\n"); ret = -1; goto out; } if (!option->ccnet_db_name) { seaf_warning ("ccnet_db_name not set in config.\n"); ret = -1; goto out; } if (!option->seafile_db_name) { seaf_warning ("db_name not set in config.\n"); ret = -1; goto out; } out: if (ret < 0) { db_option_free (option); return NULL; } return option; } static int mysql_db_start (SeafileSession *session) { DBOption *option = NULL; option = load_db_option (session); if (!option) { seaf_warning ("Failed to load database config.\n"); return -1; } session->db = seaf_db_new_mysql (option->host, option->port, option->user, option->passwd, option->seafile_db_name, NULL, option->use_ssl, option->skip_verify, option->ca_path, option->charset, option->max_connections); if (!session->db) { db_option_free (option); seaf_warning ("Failed to start mysql db.\n"); return -1; } db_option_free (option); return 0; } #endif #ifdef HAVE_POSTGRESQL static int pgsql_db_start (SeafileSession *session) { char *host, *user, *passwd, *db, *unix_socket; unsigned int port; GError *error = NULL; host = seaf_key_file_get_string (session->config, "database", "host", &error); if (!host) { seaf_warning ("DB host not set in config.\n"); return -1; } user = seaf_key_file_get_string (session->config, "database", "user", &error); if (!user) { seaf_warning ("DB user not set in config.\n"); return -1; } passwd = seaf_key_file_get_string (session->config, "database", "password", &error); if (!passwd) { seaf_warning ("DB passwd not set in config.\n"); return -1; } db = seaf_key_file_get_string (session->config, "database", "db_name", &error); if (!db) { seaf_warning ("DB name not set in config.\n"); return -1; } port = g_key_file_get_integer (session->config, "database", "port", &error); if (error) { port = 0; g_clear_error (&error); } unix_socket = seaf_key_file_get_string (session->config, "database", "unix_socket", &error); session->db = seaf_db_new_pgsql (host, port, user, passwd, db, unix_socket, DEFAULT_MAX_CONNECTIONS); if (!session->db) { seaf_warning ("Failed to start pgsql db.\n"); return -1; } g_free (host); g_free (user); g_free (passwd); g_free (db); g_free (unix_socket); return 0; } #endif int load_database_config (SeafileSession *session) { char *type; GError *error = NULL; int ret = 0; gboolean create_tables = FALSE; type = seaf_key_file_get_string (session->config, "database", "type", &error); /* Default to use mysql if not set. */ if (type && strcasecmp (type, "sqlite") == 0) { ret = sqlite_db_start (session); } #ifdef HAVE_MYSQL else { ret = mysql_db_start (session); } #endif if (ret == 0) { if (g_key_file_has_key (session->config, "database", "create_tables", NULL)) create_tables = g_key_file_get_boolean (session->config, "database", "create_tables", NULL); session->create_tables = create_tables; } g_free (type); return ret; } static int ccnet_init_sqlite_database (SeafileSession *session) { char *db_path; db_path = g_build_path ("/", session->ccnet_dir, CCNET_DB, NULL); session->ccnet_db = seaf_db_new_sqlite (db_path, DEFAULT_MAX_CONNECTIONS); if (!session->ccnet_db) { seaf_warning ("Failed to open ccnet database.\n"); return -1; } return 0; } #ifdef HAVE_MYSQL static int ccnet_init_mysql_database (SeafileSession *session) { DBOption *option = NULL; option = load_db_option (session); if (!option) { seaf_warning ("Failed to load database config.\n"); return -1; } session->ccnet_db = seaf_db_new_mysql (option->host, option->port, option->user, option->passwd, option->ccnet_db_name, NULL, option->use_ssl, option->skip_verify, option->ca_path, option->charset, option->max_connections); if (!session->ccnet_db) { db_option_free (option); seaf_warning ("Failed to open ccnet database.\n"); return -1; } db_option_free (option); return 0; } #endif int load_ccnet_database_config (SeafileSession *session) { int ret; char *engine; gboolean create_tables = FALSE; engine = ccnet_key_file_get_string (session->config, "database", "type"); if (engine && strcasecmp (engine, "sqlite") == 0) { seaf_message ("Use database sqlite\n"); ret = ccnet_init_sqlite_database (session); } #ifdef HAVE_MYSQL else { seaf_message("Use database Mysql\n"); ret = ccnet_init_mysql_database (session); } #endif if (ret == 0) { if (g_key_file_has_key (session->config, "database", "create_tables", NULL)) create_tables = g_key_file_get_boolean (session->config, "database", "create_tables", NULL); session->ccnet_create_tables = create_tables; } g_free (engine); return ret; } #ifdef FULL_FEATURE char * seaf_gen_notif_server_jwt (const char *repo_id, const char *username) { char *jwt_token = NULL; gint64 now = (gint64)time(NULL); jwt_t *jwt = NULL; if (!seaf->notif_server_private_key) { seaf_warning ("No private key is configured for generating jwt token\n"); return NULL; } int ret = jwt_new (&jwt); if (ret != 0 || jwt == NULL) { seaf_warning ("Failed to create jwt\n"); goto out; } ret = jwt_add_grant (jwt, "repo_id", repo_id); if (ret != 0) { seaf_warning ("Failed to add repo_id to jwt\n"); goto out; } ret = jwt_add_grant (jwt, "username", username); if (ret != 0) { seaf_warning ("Failed to add username to jwt\n"); goto out; } ret = jwt_add_grant_int (jwt, "exp", now + JWT_TOKEN_EXPIRE_TIME); if (ret != 0) { seaf_warning ("Failed to expire time to jwt\n"); goto out; } ret = jwt_set_alg (jwt, JWT_ALG_HS256, (unsigned char *)seaf->notif_server_private_key, strlen(seaf->notif_server_private_key)); if (ret != 0) { seaf_warning ("Failed to set alg\n"); goto out; } jwt_token = jwt_encode_str (jwt); out: jwt_free (jwt); return jwt_token; } #endif char * seaf_parse_auth_token (const char *auth_token) { char *token = NULL; char **parts = NULL; if (!auth_token) { return NULL; } parts = g_strsplit (auth_token, " ", 2); if (!parts) { return NULL; } if (g_strv_length (parts) < 2) { g_strfreev (parts); return NULL; } token = g_strdup(parts[1]); g_strfreev (parts); return token; } void split_filename (const char *filename, char **name, char **ext) { char *dot; dot = strrchr (filename, '.'); if (dot) { *ext = g_strdup (dot + 1); *name = g_strndup (filename, dot - filename); } else { *name = g_strdup (filename); *ext = NULL; } } static gboolean collect_token_list (SeafDBRow *row, void *data) { GList **p_tokens = data; const char *token; token = seaf_db_row_get_column_text (row, 0); *p_tokens = g_list_prepend (*p_tokens, g_strdup(token)); return TRUE; } int seaf_delete_repo_tokens (SeafRepo *repo) { int ret = 0; const char *template; GList *token_list = NULL; GList *ptr; GString *token_list_str = g_string_new (""); GString *sql = g_string_new (""); int rc; template = "SELECT u.token FROM RepoUserToken as u WHERE u.repo_id=?"; rc = seaf_db_statement_foreach_row (seaf->db, template, collect_token_list, &token_list, 1, "string", repo->id); if (rc < 0) { goto out; } if (rc == 0) goto out; for (ptr = token_list; ptr; ptr = ptr->next) { const char *token = (char *)ptr->data; if (token_list_str->len == 0) g_string_append_printf (token_list_str, "'%s'", token); else g_string_append_printf (token_list_str, ",'%s'", token); } /* Note that there is a size limit on sql query. In MySQL it's 1MB by default. * Normally the token_list won't be that long. */ g_string_printf (sql, "DELETE FROM RepoUserToken WHERE token in (%s)", token_list_str->str); rc = seaf_db_statement_query (seaf->db, sql->str, 0); if (rc < 0) { goto out; } g_string_printf (sql, "DELETE FROM RepoTokenPeerInfo WHERE token in (%s)", token_list_str->str); rc = seaf_db_statement_query (seaf->db, sql->str, 0); if (rc < 0) { goto out; } out: g_string_free (token_list_str, TRUE); g_string_free (sql, TRUE); g_list_free_full (token_list, (GDestroyNotify)g_free); if (rc < 0) { ret = -1; } return ret; } ================================================ FILE: common/seaf-utils.h ================================================ #ifndef SEAF_UTILS_H #define SEAF_UTILS_H #include struct _SeafileSession; char * seafile_session_get_tmp_file_path (struct _SeafileSession *session, const char *basename, char path[]); int load_database_config (struct _SeafileSession *session); int load_ccnet_database_config (struct _SeafileSession *session); #ifdef FULL_FEATURE #endif char * seaf_gen_notif_server_jwt (const char *repo_id, const char *username); char * seaf_parse_auth_token (const char *auth_token); void split_filename (const char *filename, char **name, char **ext); int seaf_delete_repo_tokens (SeafRepo *repo); #endif ================================================ FILE: common/seafile-crypt.c ================================================ /* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */ #include #include #include "seafile-crypt.h" #include "password-hash.h" #include #include "utils.h" #include "log.h" /* The EVP_EncryptXXX and EVP_DecryptXXX series of functions have a weird choice of returned value. */ #define ENC_SUCCESS 1 #define ENC_FAILURE 0 #define DEC_SUCCESS 1 #define DEC_FAILURE 0 #define KEYGEN_ITERATION 1 << 19 #define KEYGEN_ITERATION2 1000 /* Should generate random salt for each repo. */ static unsigned char salt[8] = { 0xda, 0x90, 0x45, 0xc3, 0x06, 0xc7, 0xcc, 0x26 }; SeafileCrypt * seafile_crypt_new (int version, unsigned char *key, unsigned char *iv) { SeafileCrypt *crypt = g_new0 (SeafileCrypt, 1); crypt->version = version; if (version == 1) memcpy (crypt->key, key, 16); else memcpy (crypt->key, key, 32); memcpy (crypt->iv, iv, 16); return crypt; } int seafile_derive_key (const char *data_in, int in_len, int version, const char *repo_salt, unsigned char *key, unsigned char *iv) { if (version >= 3) { unsigned char repo_salt_bin[32]; hex_to_rawdata (repo_salt, repo_salt_bin, 32); PKCS5_PBKDF2_HMAC (data_in, in_len, repo_salt_bin, sizeof(repo_salt_bin), KEYGEN_ITERATION2, EVP_sha256(), 32, key); PKCS5_PBKDF2_HMAC ((char *)key, 32, repo_salt_bin, sizeof(repo_salt_bin), 10, EVP_sha256(), 16, iv); return 0; } else if (version == 2) { PKCS5_PBKDF2_HMAC (data_in, in_len, salt, sizeof(salt), KEYGEN_ITERATION2, EVP_sha256(), 32, key); PKCS5_PBKDF2_HMAC ((char *)key, 32, salt, sizeof(salt), 10, EVP_sha256(), 16, iv); return 0; } else if (version == 1) return EVP_BytesToKey (EVP_aes_128_cbc(), /* cipher mode */ EVP_sha1(), /* message digest */ salt, /* salt */ (unsigned char*)data_in, in_len, KEYGEN_ITERATION, /* iteration times */ key, /* the derived key */ iv); /* IV, initial vector */ else return EVP_BytesToKey (EVP_aes_128_ecb(), /* cipher mode */ EVP_sha1(), /* message digest */ NULL, /* salt */ (unsigned char*)data_in, in_len, 3, /* iteration times */ key, /* the derived key */ iv); /* IV, initial vector */ } int seafile_generate_repo_salt (char *repo_salt) { unsigned char repo_salt_bin[32]; int rc = RAND_bytes (repo_salt_bin, sizeof(repo_salt_bin)); if (rc != 1) { seaf_warning ("Failed to generate salt for repo encryption.\n"); return -1; } rawdata_to_hex (repo_salt_bin, repo_salt, 32); return 0; } int seafile_generate_random_key (const char *passwd, int version, const char *repo_salt, char *random_key) { SeafileCrypt *crypt; unsigned char secret_key[32], *rand_key; int outlen; unsigned char key[32], iv[16]; int rc = RAND_bytes (secret_key, sizeof(secret_key)); if (rc != 1) { seaf_warning ("Failed to generate secret key for repo encryption.\n"); return -1; } seafile_derive_key (passwd, strlen(passwd), version, repo_salt, key, iv); crypt = seafile_crypt_new (version, key, iv); seafile_encrypt ((char **)&rand_key, &outlen, (char *)secret_key, sizeof(secret_key), crypt); rawdata_to_hex (rand_key, random_key, 48); g_free (crypt); g_free (rand_key); return 0; } void seafile_generate_magic (int version, const char *repo_id, const char *passwd, const char *repo_salt, char *magic) { GString *buf = g_string_new (NULL); unsigned char key[32], iv[16]; /* Compute a "magic" string from repo_id and passwd. * This is used to verify the password given by user before decrypting * data. */ g_string_append_printf (buf, "%s%s", repo_id, passwd); seafile_derive_key (buf->str, buf->len, version, repo_salt, key, iv); g_string_free (buf, TRUE); rawdata_to_hex (key, magic, 32); } void seafile_generate_pwd_hash (int version, const char *repo_id, const char *passwd, const char *repo_salt, const char *algo, const char *params_str, char *pwd_hash) { GString *buf = g_string_new (NULL); unsigned char key[32]; /* Compute a "pwd_hash" string from repo_id and passwd. * This is used to verify the password given by user before decrypting * data. */ g_string_append_printf (buf, "%s%s", repo_id, passwd); if (version <= 2) { // use fixed repo salt char fixed_salt[64] = {0}; rawdata_to_hex(salt, fixed_salt, 8); pwd_hash_derive_key (buf->str, buf->len, fixed_salt, algo, params_str, key); } else { pwd_hash_derive_key (buf->str, buf->len, repo_salt, algo, params_str, key); } g_string_free (buf, TRUE); rawdata_to_hex (key, pwd_hash, 32); } int seafile_verify_repo_passwd (const char *repo_id, const char *passwd, const char *magic, int version, const char *repo_salt) { GString *buf = g_string_new (NULL); unsigned char key[32], iv[16]; char hex[65]; if (version != 1 && version != 2 && version != 3 && version != 4) { seaf_warning ("Unsupported enc_version %d.\n", version); return -1; } /* Recompute the magic and compare it with the one comes with the repo. */ g_string_append_printf (buf, "%s%s", repo_id, passwd); seafile_derive_key (buf->str, buf->len, version, repo_salt, key, iv); g_string_free (buf, TRUE); if (version >= 2) rawdata_to_hex (key, hex, 32); else rawdata_to_hex (key, hex, 16); if (g_strcmp0 (hex, magic) == 0) return 0; else return -1; } int seafile_pwd_hash_verify_repo_passwd (int version, const char *repo_id, const char *passwd, const char *repo_salt, const char *pwd_hash, const char *algo, const char *params_str) { GString *buf = g_string_new (NULL); unsigned char key[32]; char hex[65]; g_string_append_printf (buf, "%s%s", repo_id, passwd); if (version <= 2) { // use fixed repo salt char fixed_salt[64] = {0}; rawdata_to_hex(salt, fixed_salt, 8); pwd_hash_derive_key (buf->str, buf->len, fixed_salt, algo, params_str, key); } else { pwd_hash_derive_key (buf->str, buf->len, repo_salt, algo, params_str, key); } g_string_free (buf, TRUE); rawdata_to_hex (key, hex, 32); if (g_strcmp0 (hex, pwd_hash) == 0) return 0; else return -1; } int seafile_decrypt_repo_enc_key (int enc_version, const char *passwd, const char *random_key, const char *repo_salt, unsigned char *key_out, unsigned char *iv_out) { unsigned char key[32], iv[16]; seafile_derive_key (passwd, strlen(passwd), enc_version, repo_salt, key, iv); if (enc_version == 1) { memcpy (key_out, key, 16); memcpy (iv_out, iv, 16); return 0; } else if (enc_version >= 2) { unsigned char enc_random_key[48], *dec_random_key; int outlen; SeafileCrypt *crypt; if (random_key == NULL || random_key[0] == 0) { seaf_warning ("Empty random key.\n"); return -1; } hex_to_rawdata (random_key, enc_random_key, 48); crypt = seafile_crypt_new (enc_version, key, iv); if (seafile_decrypt ((char **)&dec_random_key, &outlen, (char *)enc_random_key, 48, crypt) < 0) { seaf_warning ("Failed to decrypt random key.\n"); g_free (crypt); return -1; } g_free (crypt); seafile_derive_key ((char *)dec_random_key, 32, enc_version, repo_salt, key, iv); memcpy (key_out, key, 32); memcpy (iv_out, iv, 16); g_free (dec_random_key); return 0; } return -1; } int seafile_update_random_key (const char *old_passwd, const char *old_random_key, const char *new_passwd, char *new_random_key, int enc_version, const char *repo_salt) { unsigned char key[32], iv[16]; unsigned char random_key_raw[48], *secret_key, *new_random_key_raw; int secret_key_len, random_key_len; SeafileCrypt *crypt; /* First, use old_passwd to decrypt secret key from old_random_key. */ seafile_derive_key (old_passwd, strlen(old_passwd), enc_version, repo_salt, key, iv); hex_to_rawdata (old_random_key, random_key_raw, 48); crypt = seafile_crypt_new (enc_version, key, iv); if (seafile_decrypt ((char **)&secret_key, &secret_key_len, (char *)random_key_raw, 48, crypt) < 0) { seaf_warning ("Failed to decrypt random key.\n"); g_free (crypt); return -1; } g_free (crypt); /* Second, use new_passwd to encrypt secret key. */ seafile_derive_key (new_passwd, strlen(new_passwd), enc_version, repo_salt, key, iv); crypt = seafile_crypt_new (enc_version, key, iv); seafile_encrypt ((char **)&new_random_key_raw, &random_key_len, (char *)secret_key, secret_key_len, crypt); rawdata_to_hex (new_random_key_raw, new_random_key, 48); g_free (secret_key); g_free (new_random_key_raw); g_free (crypt); return 0; } int seafile_encrypt (char **data_out, int *out_len, const char *data_in, const int in_len, SeafileCrypt *crypt) { *data_out = NULL; *out_len = -1; /* check validation */ if ( data_in == NULL || in_len <= 0 || crypt == NULL) { seaf_warning ("Invalid params.\n"); return -1; } EVP_CIPHER_CTX *ctx; int ret; int blks; /* Prepare CTX for encryption. */ ctx = EVP_CIPHER_CTX_new (); if (crypt->version == 1) ret = EVP_EncryptInit_ex (ctx, EVP_aes_128_cbc(), /* cipher mode */ NULL, /* engine, NULL for default */ crypt->key, /* derived key */ crypt->iv); /* initial vector */ else if (crypt->version == 3) ret = EVP_EncryptInit_ex (ctx, EVP_aes_128_ecb(), /* cipher mode */ NULL, /* engine, NULL for default */ crypt->key, /* derived key */ crypt->iv); /* initial vector */ else ret = EVP_EncryptInit_ex (ctx, EVP_aes_256_cbc(), /* cipher mode */ NULL, /* engine, NULL for default */ crypt->key, /* derived key */ crypt->iv); /* initial vector */ if (ret == ENC_FAILURE) { EVP_CIPHER_CTX_free (ctx); return -1; } /* Allocating output buffer. */ /* For EVP symmetric encryption, padding is always used __even if__ data size is a multiple of block size, in which case the padding length is the block size. so we have the following: */ blks = (in_len / BLK_SIZE) + 1; *data_out = (char *)g_malloc (blks * BLK_SIZE); if (*data_out == NULL) { seaf_warning ("failed to allocate the output buffer.\n"); goto enc_error; } int update_len, final_len; /* Do the encryption. */ ret = EVP_EncryptUpdate (ctx, (unsigned char*)*data_out, &update_len, (unsigned char*)data_in, in_len); if (ret == ENC_FAILURE) goto enc_error; /* Finish the possible partial block. */ ret = EVP_EncryptFinal_ex (ctx, (unsigned char*)*data_out + update_len, &final_len); *out_len = update_len + final_len; /* out_len should be equal to the allocated buffer size. */ if (ret == ENC_FAILURE || *out_len != (blks * BLK_SIZE)) goto enc_error; EVP_CIPHER_CTX_free (ctx); return 0; enc_error: EVP_CIPHER_CTX_free (ctx); *out_len = -1; if (*data_out != NULL) g_free (*data_out); *data_out = NULL; return -1; } int seafile_decrypt (char **data_out, int *out_len, const char *data_in, const int in_len, SeafileCrypt *crypt) { *data_out = NULL; *out_len = -1; /* Check validation. Because padding is always used, in_len must * be a multiple of BLK_SIZE */ if ( data_in == NULL || in_len <= 0 || in_len % BLK_SIZE != 0 || crypt == NULL) { seaf_warning ("Invalid param(s).\n"); return -1; } EVP_CIPHER_CTX *ctx; int ret; /* Prepare CTX for decryption. */ ctx = EVP_CIPHER_CTX_new (); if (crypt->version == 1) ret = EVP_DecryptInit_ex (ctx, EVP_aes_128_cbc(), /* cipher mode */ NULL, /* engine, NULL for default */ crypt->key, /* derived key */ crypt->iv); /* initial vector */ else if (crypt->version == 3) ret = EVP_DecryptInit_ex (ctx, EVP_aes_128_ecb(), /* cipher mode */ NULL, /* engine, NULL for default */ crypt->key, /* derived key */ crypt->iv); /* initial vector */ else ret = EVP_DecryptInit_ex (ctx, EVP_aes_256_cbc(), /* cipher mode */ NULL, /* engine, NULL for default */ crypt->key, /* derived key */ crypt->iv); /* initial vector */ if (ret == DEC_FAILURE) { EVP_CIPHER_CTX_free (ctx); return -1; } /* Allocating output buffer. */ *data_out = (char *)g_malloc (in_len); if (*data_out == NULL) { seaf_warning ("failed to allocate the output buffer.\n"); goto dec_error; } int update_len, final_len; /* Do the decryption. */ ret = EVP_DecryptUpdate (ctx, (unsigned char*)*data_out, &update_len, (unsigned char*)data_in, in_len); if (ret == DEC_FAILURE) goto dec_error; /* Finish the possible partial block. */ ret = EVP_DecryptFinal_ex (ctx, (unsigned char*)*data_out + update_len, &final_len); *out_len = update_len + final_len; /* out_len should be smaller than in_len. */ if (ret == DEC_FAILURE || *out_len > in_len) goto dec_error; EVP_CIPHER_CTX_free (ctx); return 0; dec_error: EVP_CIPHER_CTX_free (ctx); *out_len = -1; if (*data_out != NULL) g_free (*data_out); *data_out = NULL; return -1; } int seafile_decrypt_init (EVP_CIPHER_CTX **ctx, int version, const unsigned char *key, const unsigned char *iv) { int ret; /* Prepare CTX for decryption. */ *ctx = EVP_CIPHER_CTX_new (); if (version == 1) ret = EVP_DecryptInit_ex (*ctx, EVP_aes_128_cbc(), /* cipher mode */ NULL, /* engine, NULL for default */ key, /* derived key */ iv); /* initial vector */ else if (version == 3) ret = EVP_DecryptInit_ex (*ctx, EVP_aes_128_ecb(), /* cipher mode */ NULL, /* engine, NULL for default */ key, /* derived key */ iv); /* initial vector */ else ret = EVP_DecryptInit_ex (*ctx, EVP_aes_256_cbc(), /* cipher mode */ NULL, /* engine, NULL for default */ key, /* derived key */ iv); /* initial vector */ if (ret == DEC_FAILURE) return -1; return 0; } ================================================ FILE: common/seafile-crypt.h ================================================ /* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */ /* Description: The function pair "seafile_encrypt/seafile_decrypt" are used to encrypt/decrypt data in the seafile system, using AES 128 bit ecb algorithm provided by openssl. */ #ifndef _SEAFILE_CRYPT_H #define _SEAFILE_CRYPT_H #include #include /* Block size, in bytes. For AES it can only be 16 bytes. */ #define BLK_SIZE 16 #define ENCRYPT_BLK_SIZE BLK_SIZE struct SeafileCrypt { int version; unsigned char key[32]; /* set when enc_version >= 1 */ unsigned char iv[16]; }; typedef struct SeafileCrypt SeafileCrypt; SeafileCrypt * seafile_crypt_new (int version, unsigned char *key, unsigned char *iv); /* Derive key and iv used by AES encryption from @data_in. key and iv is 16 bytes for version 1, and 32 bytes for version 2. @data_out: pointer to the output of the encrpyted/decrypted data, whose content must be freed by g_free when not used. @out_len: pointer to length of output, in bytes @data_in: address of input buffer @in_len: length of data to be encrpyted/decrypted, in bytes @crypt: container of crypto info. RETURN VALUES: On success, 0 is returned, and the encrpyted/decrypted data is in *data_out, with out_len set to its length. On failure, -1 is returned and *data_out is set to NULL, with out_len set to -1; */ int seafile_derive_key (const char *data_in, int in_len, int version, const char *repo_salt, unsigned char *key, unsigned char *iv); /* @salt must be an char array of size 65 bytes. */ int seafile_generate_repo_salt (char *repo_salt); /* * Generate the real key used to encrypt data. * The key 32 bytes long and encrpted with @passwd. */ int seafile_generate_random_key (const char *passwd, int version, const char *repo_salt, char *random_key); void seafile_generate_magic (int version, const char *repo_id, const char *passwd, const char *repo_salt, char *magic); void seafile_generate_pwd_hash (int version, const char *repo_id, const char *passwd, const char *repo_salt, const char *algo, const char *params_str, char *pwd_hash); int seafile_verify_repo_passwd (const char *repo_id, const char *passwd, const char *magic, int version, const char *repo_salt); int seafile_pwd_hash_verify_repo_passwd (int version, const char *repo_id, const char *passwd, const char *repo_salt, const char *pwd_hash, const char *algo, const char *params_str); int seafile_decrypt_repo_enc_key (int enc_version, const char *passwd, const char *random_key, const char *repo_salt, unsigned char *key_out, unsigned char *iv_out); int seafile_update_random_key (const char *old_passwd, const char *old_random_key, const char *new_passwd, char *new_random_key, int enc_version, const char *repo_salt); int seafile_encrypt (char **data_out, int *out_len, const char *data_in, const int in_len, SeafileCrypt *crypt); int seafile_decrypt (char **data_out, int *out_len, const char *data_in, const int in_len, SeafileCrypt *crypt); int seafile_decrypt_init (EVP_CIPHER_CTX **ctx, int version, const unsigned char *key, const unsigned char *iv); #endif /* _SEAFILE_CRYPT_H */ ================================================ FILE: common/sync-repo-common.h ================================================ /* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */ #ifndef SYNC_REPO_COMMON #define SYNC_REPO_COMMON #define SC_COMMIT_ID "300" #define SS_COMMIT_ID "Commit ID" #define SC_NO_REPO "301" #define SS_NO_REPO "No such repo" #define SC_NO_BRANCH "302" #define SS_NO_BRANCH "No such branch" #define SC_NO_DSYNC "303" #define SS_NO_DSYNC "Not double sync" #define SC_REPO_CORRUPT "304" #define SS_REPO_CORRUPT "Repo corrupted" #define SC_SERVER_ERROR "401" #define SS_SERVER_ERROR "Internal server error" #endif ================================================ FILE: common/user-mgr.c ================================================ /* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */ #include "common.h" #include #include #include "utils.h" #include "seafile-session.h" #include "seafile-error.h" #include "user-mgr.h" #include "seaf-db.h" #include "seaf-utils.h" #include #include #include #define DEBUG_FLAG CCNET_DEBUG_PEER #include "log.h" #define DEFAULT_SAVING_INTERVAL_MSEC 30000 #define DEFAULT_MAX_CONNECTIONS 100 G_DEFINE_TYPE (CcnetUserManager, ccnet_user_manager, G_TYPE_OBJECT); #define GET_PRIV(o) \ (G_TYPE_INSTANCE_GET_PRIVATE ((o), CCNET_TYPE_USER_MANAGER, CcnetUserManagerPriv)) static int open_db (CcnetUserManager *manager); struct CcnetUserManagerPriv { CcnetDB *db; int max_users; }; static void ccnet_user_manager_class_init (CcnetUserManagerClass *klass) { g_type_class_add_private (klass, sizeof (CcnetUserManagerPriv)); } static void ccnet_user_manager_init (CcnetUserManager *manager) { manager->priv = GET_PRIV(manager); } CcnetUserManager* ccnet_user_manager_new (SeafileSession *session) { CcnetUserManager* manager; manager = g_object_new (CCNET_TYPE_USER_MANAGER, NULL); manager->session = session; manager->user_hash = g_hash_table_new (g_str_hash, g_str_equal); return manager; } #define DEFAULT_PASSWD_HASH_ITER 10000 // return current active user number static int get_current_user_number (CcnetUserManager *manager) { int total = 0, count; count = ccnet_user_manager_count_emailusers (manager, "DB"); if (count < 0) { ccnet_warning ("Failed to get user number from DB.\n"); return -1; } total += count; return total; } static gboolean check_user_number (CcnetUserManager *manager, gboolean allow_equal) { if (manager->priv->max_users == 0) { return TRUE; } int cur_num = get_current_user_number (manager); if (cur_num < 0) { return FALSE; } if ((allow_equal && cur_num > manager->priv->max_users) || (!allow_equal && cur_num >= manager->priv->max_users)) { ccnet_warning ("The number of users exceeds limit, max %d, current %d\n", manager->priv->max_users, cur_num); return FALSE; } return TRUE; } int ccnet_user_manager_prepare (CcnetUserManager *manager) { int ret; manager->passwd_hash_iter = DEFAULT_PASSWD_HASH_ITER; manager->userdb_path = g_build_filename (manager->session->ccnet_dir, "user-db", NULL); ret = open_db(manager); if (ret < 0) return ret; if (!check_user_number (manager, TRUE)) { return -1; } return 0; } void ccnet_user_manager_free (CcnetUserManager *manager) { g_object_unref (manager); } void ccnet_user_manager_start (CcnetUserManager *manager) { } void ccnet_user_manager_on_exit (CcnetUserManager *manager) { } void ccnet_user_manager_set_max_users (CcnetUserManager *manager, gint64 max_users) { manager->priv->max_users = max_users; } /* -------- DB Operations -------- */ static int check_db_table (SeafDB *db) { char *sql; int db_type = seaf_db_type (db); if (db_type == SEAF_DB_TYPE_MYSQL) { sql = "CREATE TABLE IF NOT EXISTS EmailUser (" "id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, " "email VARCHAR(255), passwd VARCHAR(256), " "is_staff BOOL NOT NULL, is_active BOOL NOT NULL, " "ctime BIGINT, reference_id VARCHAR(255)," "UNIQUE INDEX (email), UNIQUE INDEX (reference_id))" "ENGINE=INNODB"; if (seaf_db_query (db, sql) < 0) return -1; sql = "CREATE TABLE IF NOT EXISTS Binding (id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, " "email VARCHAR(255), peer_id CHAR(41)," "UNIQUE INDEX (peer_id), INDEX (email(20)))" "ENGINE=INNODB"; if (seaf_db_query (db, sql) < 0) return -1; sql = "CREATE TABLE IF NOT EXISTS UserRole (" "id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, " "email VARCHAR(255), role VARCHAR(255), UNIQUE INDEX (email)) " "ENGINE=INNODB"; if (seaf_db_query (db, sql) < 0) return -1; sql = "CREATE TABLE IF NOT EXISTS LDAPConfig ( " "id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, cfg_group VARCHAR(255) NOT NULL," "cfg_key VARCHAR(255) NOT NULL, value VARCHAR(255), property INTEGER) ENGINE=INNODB"; if (seaf_db_query (db, sql) < 0) return -1; } else if (db_type == SEAF_DB_TYPE_SQLITE) { sql = "CREATE TABLE IF NOT EXISTS EmailUser (" "id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT," "email TEXT, passwd TEXT, is_staff bool NOT NULL, " "is_active bool NOT NULL, ctime INTEGER, " "reference_id TEXT)"; if (seaf_db_query (db, sql) < 0) return -1; sql = "CREATE UNIQUE INDEX IF NOT EXISTS email_index on EmailUser (email)"; if (seaf_db_query (db, sql) < 0) return -1; sql = "CREATE UNIQUE INDEX IF NOT EXISTS reference_id_index on EmailUser (reference_id)"; if (seaf_db_query (db, sql) < 0) return -1; sql = "CREATE TABLE IF NOT EXISTS Binding (email TEXT, peer_id TEXT)"; if (seaf_db_query (db, sql) < 0) return -1; sql = "CREATE INDEX IF NOT EXISTS email_index on Binding (email)"; if (seaf_db_query (db, sql) < 0) return -1; sql = "CREATE UNIQUE INDEX IF NOT EXISTS peer_index on Binding (peer_id)"; if (seaf_db_query (db, sql) < 0) return -1; sql = "CREATE TABLE IF NOT EXISTS UserRole (email TEXT, role TEXT)"; if (seaf_db_query (db, sql) < 0) return -1; sql = "CREATE INDEX IF NOT EXISTS userrole_email_index on UserRole (email)"; if (seaf_db_query (db, sql) < 0) return -1; sql = "CREATE UNIQUE INDEX IF NOT EXISTS userrole_userrole_index on UserRole (email, role)"; if (seaf_db_query (db, sql) < 0) return -1; sql = "CREATE TABLE IF NOT EXISTS LDAPConfig (cfg_group VARCHAR(255) NOT NULL," "cfg_key VARCHAR(255) NOT NULL, value VARCHAR(255), property INTEGER)"; if (seaf_db_query (db, sql) < 0) return -1; } else if (db_type == SEAF_DB_TYPE_PGSQL) { sql = "CREATE TABLE IF NOT EXISTS EmailUser (" "id SERIAL PRIMARY KEY, " "email VARCHAR(255), passwd VARCHAR(256), " "is_staff INTEGER NOT NULL, is_active INTEGER NOT NULL, " "ctime BIGINT, reference_id VARCHAR(255), UNIQUE (email))"; if (seaf_db_query (db, sql) < 0) return -1; //if (!pgsql_index_exists (db, "emailuser_reference_id_idx")) { // sql = "CREATE UNIQUE INDEX emailuser_reference_id_idx ON EmailUser (reference_id)"; // if (seaf_db_query (db, sql) < 0) // return -1; //} sql = "CREATE TABLE IF NOT EXISTS Binding (email VARCHAR(255), peer_id CHAR(41)," "UNIQUE (peer_id))"; if (seaf_db_query (db, sql) < 0) return -1; sql = "CREATE TABLE IF NOT EXISTS UserRole (email VARCHAR(255), " " role VARCHAR(255), UNIQUE (email, role))"; if (seaf_db_query (db, sql) < 0) return -1; //if (!pgsql_index_exists (db, "userrole_email_idx")) { // sql = "CREATE INDEX userrole_email_idx ON UserRole (email)"; // if (seaf_db_query (db, sql) < 0) // return -1; //} sql = "CREATE TABLE IF NOT EXISTS LDAPConfig (cfg_group VARCHAR(255) NOT NULL," "cfg_key VARCHAR(255) NOT NULL, value VARCHAR(255), property INTEGER)"; if (seaf_db_query (db, sql) < 0) return -1; } return 0; } static CcnetDB * open_sqlite_db (CcnetUserManager *manager) { CcnetDB *db = NULL; char *db_dir; char *db_path; db_dir = g_build_filename (manager->session->ccnet_dir, "PeerMgr", NULL); if (checkdir_with_mkdir(db_dir) < 0) { ccnet_error ("Cannot open db dir %s: %s\n", db_dir, strerror(errno)); return NULL; } g_free (db_dir); db_path = g_build_filename (manager->session->ccnet_dir, "PeerMgr", "usermgr.db", NULL); db = seaf_db_new_sqlite (db_path, DEFAULT_MAX_CONNECTIONS); g_free (db_path); return db; } static int open_db (CcnetUserManager *manager) { CcnetDB *db = NULL; switch (seaf_db_type(manager->session->ccnet_db)) { /* To be compatible with the db file layout of 0.9.1 version, * we don't use conf-dir/ccnet.db for user and peer info, but * user conf-dir/PeerMgr/peermgr.db and conf-dir/PeerMgr/usermgr.db instead. */ case SEAF_DB_TYPE_SQLITE: db = open_sqlite_db (manager); break; case SEAF_DB_TYPE_PGSQL: case SEAF_DB_TYPE_MYSQL: db = manager->session->ccnet_db; break; } if (!db) return -1; manager->priv->db = db; if ((manager->session->ccnet_create_tables || seaf_db_type(db) == SEAF_DB_TYPE_PGSQL) && check_db_table (db) < 0) { ccnet_warning ("Failed to create user db tables.\n"); return -1; } return 0; } /* -------- EmailUser Management -------- */ /* This fixed salt is used in very early versions. It's kept for compatibility. * For the current password hashing algorithm, please see hash_password_pbkdf2_sha256() */ static unsigned char salt[8] = { 0xdb, 0x91, 0x45, 0xc3, 0x06, 0xc7, 0xcc, 0x26 }; static void hash_password (const char *passwd, char *hashed_passwd) { unsigned char sha1[20]; SHA_CTX s; SHA1_Init (&s); SHA1_Update (&s, passwd, strlen(passwd)); SHA1_Final (sha1, &s); rawdata_to_hex (sha1, hashed_passwd, 20); } static void hash_password_salted (const char *passwd, char *hashed_passwd) { unsigned char sha[SHA256_DIGEST_LENGTH]; SHA256_CTX s; SHA256_Init (&s); SHA256_Update (&s, passwd, strlen(passwd)); SHA256_Update (&s, salt, sizeof(salt)); SHA256_Final (sha, &s); rawdata_to_hex (sha, hashed_passwd, SHA256_DIGEST_LENGTH); } static void hash_password_pbkdf2_sha256 (const char *passwd, int iterations, char **db_passwd) { guint8 sha[SHA256_DIGEST_LENGTH]; guint8 salt[SHA256_DIGEST_LENGTH]; char hashed_passwd[SHA256_DIGEST_LENGTH*2+1]; char salt_str[SHA256_DIGEST_LENGTH*2+1]; if (!RAND_bytes (salt, sizeof(salt))) { ccnet_warning ("Failed to generate salt " "with RAND_bytes(), use RAND_pseudo_bytes().\n"); RAND_pseudo_bytes (salt, sizeof(salt)); } PKCS5_PBKDF2_HMAC (passwd, strlen(passwd), salt, sizeof(salt), iterations, EVP_sha256(), sizeof(sha), sha); rawdata_to_hex (sha, hashed_passwd, SHA256_DIGEST_LENGTH); rawdata_to_hex (salt, salt_str, SHA256_DIGEST_LENGTH); /* Encode password hash related information into one string, similar to Django. */ GString *buf = g_string_new (NULL); g_string_printf (buf, "PBKDF2SHA256$%d$%s$%s", iterations, salt_str, hashed_passwd); *db_passwd = g_string_free (buf, FALSE); } static gboolean validate_passwd_pbkdf2_sha256 (const char *passwd, const char *db_passwd) { char **tokens; char *salt_str, *hash; int iter; guint8 sha[SHA256_DIGEST_LENGTH]; guint8 salt[SHA256_DIGEST_LENGTH]; char hashed_passwd[SHA256_DIGEST_LENGTH*2+1]; if (g_strcmp0 (db_passwd, "!") == 0) return FALSE; tokens = g_strsplit (db_passwd, "$", -1); if (!tokens || g_strv_length (tokens) != 4) { if (tokens) g_strfreev (tokens); ccnet_warning ("Invalide db passwd format %s.\n", db_passwd); return FALSE; } iter = atoi (tokens[1]); salt_str = tokens[2]; hash = tokens[3]; hex_to_rawdata (salt_str, salt, SHA256_DIGEST_LENGTH); PKCS5_PBKDF2_HMAC (passwd, strlen(passwd), salt, sizeof(salt), iter, EVP_sha256(), sizeof(sha), sha); rawdata_to_hex (sha, hashed_passwd, SHA256_DIGEST_LENGTH); gboolean ret = (strcmp (hash, hashed_passwd) == 0); g_strfreev (tokens); return ret; } static gboolean validate_passwd (const char *passwd, const char *stored_passwd, gboolean *need_upgrade) { char hashed_passwd[SHA256_DIGEST_LENGTH * 2 + 1]; int hash_len = strlen(stored_passwd); *need_upgrade = FALSE; if (hash_len == SHA256_DIGEST_LENGTH * 2) { hash_password_salted (passwd, hashed_passwd); *need_upgrade = TRUE; } else if (hash_len == SHA_DIGEST_LENGTH * 2) { hash_password (passwd, hashed_passwd); *need_upgrade = TRUE; } else { return validate_passwd_pbkdf2_sha256 (passwd, stored_passwd); } if (strcmp (hashed_passwd, stored_passwd) == 0) return TRUE; else return FALSE; } static int update_user_passwd (CcnetUserManager *manager, const char *email, const char *passwd) { CcnetDB *db = manager->priv->db; char *db_passwd = NULL; int ret; hash_password_pbkdf2_sha256 (passwd, manager->passwd_hash_iter, &db_passwd); /* convert email to lower case for case insensitive lookup. */ char *email_down = g_ascii_strdown (email, strlen(email)); ret = seaf_db_statement_query (db, "UPDATE EmailUser SET passwd=? WHERE email=?", 2, "string", db_passwd, "string", email_down); g_free (db_passwd); g_free (email_down); if (ret < 0) return ret; return 0; } int ccnet_user_manager_add_emailuser (CcnetUserManager *manager, const char *email, const char *passwd, int is_staff, int is_active) { CcnetDB *db = manager->priv->db; gint64 now = get_current_time(); char *db_passwd = NULL; int ret; if (!check_user_number (manager, FALSE)) { return -1; } /* A user with unhashed "!" as password cannot be logged in. * Such users are created for book keeping, such as users from * Shibboleth. */ if (g_strcmp0 (passwd, "!") != 0) hash_password_pbkdf2_sha256 (passwd, manager->passwd_hash_iter, &db_passwd); else db_passwd = g_strdup(passwd); /* convert email to lower case for case insensitive lookup. */ char *email_down = g_ascii_strdown (email, strlen(email)); ret = seaf_db_statement_query (db, "INSERT INTO EmailUser(email, passwd, is_staff, " "is_active, ctime) VALUES (?, ?, ?, ?, ?)", 5, "string", email_down, "string", db_passwd, "int", is_staff, "int", is_active, "int64", now); g_free (db_passwd); g_free (email_down); if (ret < 0) return ret; return 0; } int ccnet_user_manager_remove_emailuser (CcnetUserManager *manager, const char *source, const char *email) { CcnetDB *db = manager->priv->db; int ret; seaf_db_statement_query (db, "DELETE FROM UserRole WHERE email=?", 1, "string", email); if (strcmp (source, "DB") == 0) { ret = seaf_db_statement_query (db, "DELETE FROM EmailUser WHERE email=?", 1, "string", email); return ret; } return -1; } static gboolean get_password (CcnetDBRow *row, void *data) { char **p_passwd = data; *p_passwd = g_strdup(seaf_db_row_get_column_text (row, 0)); return FALSE; } int ccnet_user_manager_validate_emailuser (CcnetUserManager *manager, const char *email, const char *passwd) { CcnetDB *db = manager->priv->db; int ret = -1; char *sql; char *email_down; char *login_id; char *stored_passwd = NULL; gboolean need_upgrade = FALSE; /* Users with password "!" are for internal book keeping only. */ if (g_strcmp0 (passwd, "!") == 0) return -1; login_id = ccnet_user_manager_get_login_id (manager, email); if (!login_id) { ccnet_warning ("Failed to get login_id for %s\n", email); return -1; } sql = "SELECT passwd FROM EmailUser WHERE email=?"; if (seaf_db_statement_foreach_row (db, sql, get_password, &stored_passwd, 1, "string", login_id) > 0) { if (validate_passwd (passwd, stored_passwd, &need_upgrade)) { if (need_upgrade) update_user_passwd (manager, login_id, passwd); ret = 0; goto out; } else { goto out; } } email_down = g_ascii_strdown (email, strlen(login_id)); if (seaf_db_statement_foreach_row (db, sql, get_password, &stored_passwd, 1, "string", email_down) > 0) { g_free (email_down); if (validate_passwd (passwd, stored_passwd, &need_upgrade)) { if (need_upgrade) update_user_passwd (manager, login_id, passwd); ret = 0; goto out; } else { goto out; } } g_free (email_down); out: g_free (login_id); g_free (stored_passwd); return ret; } static gboolean get_emailuser_cb (CcnetDBRow *row, void *data) { CcnetEmailUser **p_emailuser = data; int id = seaf_db_row_get_column_int (row, 0); const char *email = (const char *)seaf_db_row_get_column_text (row, 1); int is_staff = seaf_db_row_get_column_int (row, 2); int is_active = seaf_db_row_get_column_int (row, 3); gint64 ctime = seaf_db_row_get_column_int64 (row, 4); const char *password = seaf_db_row_get_column_text (row, 5); const char *reference_id = seaf_db_row_get_column_text (row, 6); const char *role = seaf_db_row_get_column_text (row, 7); char *email_l = g_ascii_strdown (email, -1); *p_emailuser = g_object_new (CCNET_TYPE_EMAIL_USER, "id", id, "email", email_l, "is_staff", is_staff, "is_active", is_active, "ctime", ctime, "source", "DB", "password", password, "reference_id", reference_id, "role", role ? role : "", NULL); g_free (email_l); return FALSE; } static char* ccnet_user_manager_get_role_emailuser (CcnetUserManager *manager, const char* email); static CcnetEmailUser* get_emailuser (CcnetUserManager *manager, const char *email, gboolean import, GError **error) { CcnetDB *db = manager->priv->db; char *sql; CcnetEmailUser *emailuser = NULL; char *email_down; int rc; sql = "SELECT e.id, e.email, is_staff, is_active, ctime, passwd, reference_id, role " " FROM EmailUser e LEFT JOIN UserRole ON e.email = UserRole.email " " WHERE e.email=?"; rc = seaf_db_statement_foreach_row (db, sql, get_emailuser_cb, &emailuser, 1, "string", email); if (rc > 0) { return emailuser; } else if (rc < 0) { if (error) { g_set_error (error, CCNET_DOMAIN, CCNET_ERR_INTERNAL, "Database error"); } return NULL; } email_down = g_ascii_strdown (email, strlen(email)); rc = seaf_db_statement_foreach_row (db, sql, get_emailuser_cb, &emailuser, 1, "string", email_down); if (rc > 0) { g_free (email_down); return emailuser; } else if (rc < 0) { if (error) { g_set_error (error, CCNET_DOMAIN, CCNET_ERR_INTERNAL, "Database error"); } g_free (email_down); return NULL; } g_free (email_down); return NULL; } CcnetEmailUser* ccnet_user_manager_get_emailuser (CcnetUserManager *manager, const char *email, GError **error) { return get_emailuser (manager, email, FALSE, error); } CcnetEmailUser* ccnet_user_manager_get_emailuser_with_import (CcnetUserManager *manager, const char *email, GError **error) { return get_emailuser (manager, email, TRUE, error); } CcnetEmailUser* ccnet_user_manager_get_emailuser_by_id (CcnetUserManager *manager, int id) { CcnetDB *db = manager->priv->db; char *sql; CcnetEmailUser *emailuser = NULL; sql = "SELECT e.id, e.email, is_staff, is_active, ctime, passwd, reference_id, role " " FROM EmailUser e LEFT JOIN UserRole ON e.email = UserRole.email " " WHERE e.id=?"; if (seaf_db_statement_foreach_row (db, sql, get_emailuser_cb, &emailuser, 1, "int", id) < 0) return NULL; return emailuser; } static gboolean get_emailusers_cb (CcnetDBRow *row, void *data) { GList **plist = data; CcnetEmailUser *emailuser; int id = seaf_db_row_get_column_int (row, 0); const char *email = (const char *)seaf_db_row_get_column_text (row, 1); int is_staff = seaf_db_row_get_column_int (row, 2); int is_active = seaf_db_row_get_column_int (row, 3); gint64 ctime = seaf_db_row_get_column_int64 (row, 4); const char *role = (const char *)seaf_db_row_get_column_text (row, 5); const char *password = seaf_db_row_get_column_text (row, 6); char *email_l = g_ascii_strdown (email, -1); emailuser = g_object_new (CCNET_TYPE_EMAIL_USER, "id", id, "email", email_l, "is_staff", is_staff, "is_active", is_active, "ctime", ctime, "role", role ? role : "", "source", "DB", "password", password, NULL); g_free (email_l); *plist = g_list_prepend (*plist, emailuser); return TRUE; } GList* ccnet_user_manager_get_emailusers (CcnetUserManager *manager, const char *source, int start, int limit, const char *status) { CcnetDB *db = manager->priv->db; const char *status_condition = ""; char *sql = NULL; GList *ret = NULL; int rc; if (g_strcmp0 (source, "DB") != 0) return NULL; if (start == -1 && limit == -1) { if (g_strcmp0(status, "active") == 0) status_condition = "WHERE t1.is_active = 1"; else if (g_strcmp0(status, "inactive") == 0) status_condition = "WHERE t1.is_active = 0"; sql = g_strdup_printf ("SELECT t1.id, t1.email, " "t1.is_staff, t1.is_active, t1.ctime, " "t2.role, t1.passwd FROM EmailUser t1 " "LEFT JOIN UserRole t2 " "ON t1.email = t2.email %s " "WHERE t1.email NOT LIKE '%%@seafile_group'", status_condition); rc = seaf_db_statement_foreach_row (db, sql, get_emailusers_cb, &ret, 0); g_free (sql); } else { if (g_strcmp0(status, "active") == 0) status_condition = "WHERE t1.is_active = 1"; else if (g_strcmp0(status, "inactive") == 0) status_condition = "WHERE t1.is_active = 0"; sql = g_strdup_printf ("SELECT t1.id, t1.email, " "t1.is_staff, t1.is_active, t1.ctime, " "t2.role, t1.passwd FROM EmailUser t1 " "LEFT JOIN UserRole t2 " "ON t1.email = t2.email %s " "WHERE t1.email NOT LIKE '%%@seafile_group' " "ORDER BY t1.id LIMIT ? OFFSET ?", status_condition); rc = seaf_db_statement_foreach_row (db, sql, get_emailusers_cb, &ret, 2, "int", limit, "int", start); g_free (sql); } if (rc < 0) { while (ret != NULL) { g_object_unref (ret->data); ret = g_list_delete_link (ret, ret); } return NULL; } return g_list_reverse (ret); } GList* ccnet_user_manager_search_emailusers (CcnetUserManager *manager, const char *source, const char *keyword, int start, int limit) { CcnetDB *db = manager->priv->db; GList *ret = NULL; int rc; char *db_patt = g_strdup_printf ("%%%s%%", keyword); if (strcmp (source, "DB") != 0) { g_free (db_patt); return NULL; } if (start == -1 && limit == -1) rc = seaf_db_statement_foreach_row (db, "SELECT t1.id, t1.email, " "t1.is_staff, t1.is_active, t1.ctime, " "t2.role, t1.passwd FROM EmailUser t1 " "LEFT JOIN UserRole t2 " "ON t1.email = t2.email " "WHERE t1.Email LIKE ? " "AND t1.email NOT LIKE '%%@seafile_group' " "ORDER BY t1.id", get_emailusers_cb, &ret, 1, "string", db_patt); else rc = seaf_db_statement_foreach_row (db, "SELECT t1.id, t1.email, " "t1.is_staff, t1.is_active, t1.ctime, " "t2.role, t1.passwd FROM EmailUser t1 " "LEFT JOIN UserRole t2 " "ON t1.email = t2.email " "WHERE t1.Email LIKE ? " "AND t1.email NOT LIKE '%%@seafile_group' " "ORDER BY t1.id LIMIT ? OFFSET ?", get_emailusers_cb, &ret, 3, "string", db_patt, "int", limit, "int", start); g_free (db_patt); if (rc < 0) { while (ret != NULL) { g_object_unref (ret->data); ret = g_list_delete_link (ret, ret); } return NULL; } return g_list_reverse (ret); } gint64 ccnet_user_manager_count_emailusers (CcnetUserManager *manager, const char *source) { CcnetDB* db = manager->priv->db; char sql[512]; gint64 ret; if (g_strcmp0 (source, "DB") != 0) return -1; snprintf (sql, 512, "SELECT COUNT(id) FROM EmailUser WHERE is_active = 1"); ret = seaf_db_get_int64 (db, sql); if (ret < 0) return -1; return ret; } gint64 ccnet_user_manager_count_inactive_emailusers (CcnetUserManager *manager, const char *source) { CcnetDB* db = manager->priv->db; char sql[512]; gint64 ret; if (g_strcmp0 (source, "DB") != 0) return -1; snprintf (sql, 512, "SELECT COUNT(id) FROM EmailUser WHERE is_active = 0"); ret = seaf_db_get_int64 (db, sql); if (ret < 0) return -1; return ret; } #if 0 GList* ccnet_user_manager_filter_emailusers_by_emails(CcnetUserManager *manager, const char *emails) { CcnetDB *db = manager->priv->db; char *copy = g_strdup (emails), *saveptr; GList *ret = NULL; GString *sql = g_string_new(NULL); g_string_append (sql, "SELECT * FROM EmailUser WHERE Email IN ("); char *name = strtok_r (copy, ", ", &saveptr); while (name != NULL) { g_string_append_printf (sql, "'%s',", name); name = strtok_r (NULL, ", ", &saveptr); } g_string_erase (sql, sql->len-1, 1); /* remove last "," */ g_string_append (sql, ")"); if (seaf_db_foreach_selected_row (db, sql->str, get_emailusers_cb, &ret) < 0) { while (ret != NULL) { g_object_unref (ret->data); ret = g_list_delete_link (ret, ret); } return NULL; } g_free (copy); g_string_free (sql, TRUE); return g_list_reverse (ret); } #endif int ccnet_user_manager_update_emailuser (CcnetUserManager *manager, const char *source, int id, const char* passwd, int is_staff, int is_active) { CcnetDB* db = manager->priv->db; char *db_passwd = NULL; // in case set user user1 to inactive, then add another active user user2, // if current user num already the max user num, // then reset user1 to active should fail if (is_active && !check_user_number (manager, FALSE)) { return -1; } if (strcmp (source, "DB") == 0) { if (g_strcmp0 (passwd, "!") == 0) { /* Don't update passwd if it starts with '!' */ return seaf_db_statement_query (db, "UPDATE EmailUser SET is_staff=?, " "is_active=? WHERE id=?", 3, "int", is_staff, "int", is_active, "int", id); } else { hash_password_pbkdf2_sha256 (passwd, manager->passwd_hash_iter, &db_passwd); return seaf_db_statement_query (db, "UPDATE EmailUser SET passwd=?, " "is_staff=?, is_active=? WHERE id=?", 4, "string", db_passwd, "int", is_staff, "int", is_active, "int", id); } } return -1; } static gboolean get_role_emailuser_cb (CcnetDBRow *row, void *data) { *((char **)data) = g_strdup (seaf_db_row_get_column_text (row, 0)); return FALSE; } static char* ccnet_user_manager_get_role_emailuser (CcnetUserManager *manager, const char* email) { CcnetDB *db = manager->priv->db; const char *sql; char* role; sql = "SELECT role FROM UserRole WHERE email=?"; if (seaf_db_statement_foreach_row (db, sql, get_role_emailuser_cb, &role, 1, "string", email) > 0) return role; return NULL; } int ccnet_user_manager_update_role_emailuser (CcnetUserManager *manager, const char* email, const char* role) { CcnetDB* db = manager->priv->db; char *old_role = ccnet_user_manager_get_role_emailuser (manager, email); if (old_role) { g_free (old_role); return seaf_db_statement_query (db, "UPDATE UserRole SET role=? " "WHERE email=?", 2, "string", role, "string", email); } else return seaf_db_statement_query (db, "INSERT INTO UserRole(role, email)" " VALUES (?, ?)", 2, "string", role, "string", email); } GList* ccnet_user_manager_get_superusers(CcnetUserManager *manager) { CcnetDB* db = manager->priv->db; GList *ret = NULL; char sql[512]; snprintf (sql, 512, "SELECT t1.id, t1.email, " "t1.is_staff, t1.is_active, t1.ctime, " "t2.role, t1.passwd FROM EmailUser t1 " "LEFT JOIN UserRole t2 " "ON t1.email = t2.email " "WHERE is_staff = 1 AND t1.email NOT LIKE '%%@seafile_group';"); if (seaf_db_foreach_selected_row (db, sql, get_emailusers_cb, &ret) < 0) { while (ret != NULL) { g_object_unref (ret->data); ret = g_list_delete_link (ret, ret); } return NULL; } return g_list_reverse (ret); } char * ccnet_user_manager_get_login_id (CcnetUserManager *manager, const char *primary_id) { return g_strdup (primary_id); } GList * ccnet_user_manager_get_emailusers_in_list (CcnetUserManager *manager, const char *source, const char *user_list, GError **error) { int i; const char *username; json_t *j_array = NULL, *j_obj; json_error_t j_error; GList *ret = NULL; const char *args[20]; j_array = json_loadb (user_list, strlen(user_list), 0, &j_error); if (!j_array) { g_set_error (error, CCNET_DOMAIN, 0, "Bad args."); return NULL; } /* Query 20 users at most. */ size_t user_num = json_array_size (j_array); if (user_num > 20) { g_set_error (error, CCNET_DOMAIN, 0, "Number of users exceeds 20."); json_decref (j_array); return NULL; } GString *sql = g_string_new (""); for (i = 0; i < 20; i++) { if (i < user_num) { j_obj = json_array_get (j_array, i); username = json_string_value(j_obj); args[i] = username; } else { args[i] = ""; } } if (strcmp (source, "DB") != 0) goto out; g_string_printf (sql, "SELECT e.id, e.email, is_staff, is_active, ctime, " "role, passwd FROM EmailUser e " "LEFT JOIN UserRole r ON e.email = r.email " "WHERE e.email IN (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)"); if (seaf_db_statement_foreach_row (manager->priv->db, sql->str, get_emailusers_cb, &ret, 20, "string", args[0], "string", args[1], "string", args[2], "string", args[3], "string", args[4], "string", args[5], "string", args[6], "string", args[7], "string", args[8], "string", args[9], "string", args[10], "string", args[11], "string", args[12], "string", args[13], "string", args[14], "string", args[15], "string", args[16], "string", args[17], "string", args[18], "string", args[19]) < 0) ccnet_warning("Failed to get users in list %s.\n", user_list); out: json_decref (j_array); g_string_free (sql, TRUE); return ret; } int ccnet_user_manager_update_emailuser_id (CcnetUserManager *manager, const char *old_email, const char *new_email, GError **error) { int ret = -1; int rc; GString *sql = g_string_new (""); //1.update RepoOwner g_string_printf (sql, "UPDATE RepoOwner SET owner_id=? WHERE owner_id=?"); rc = seaf_db_statement_query (seaf->db, sql->str, 2, "string", new_email, "string", old_email); if (rc < 0){ ccnet_warning ("Failed to update repo owner\n"); goto out; } //2.update SharedRepo g_string_printf (sql, "UPDATE SharedRepo SET from_email=? WHERE from_email=?"); rc = seaf_db_statement_query (seaf->db, sql->str, 2, "string", new_email, "string", old_email); if (rc < 0){ ccnet_warning ("Failed to update from_email\n"); goto out; } g_string_printf (sql, "UPDATE SharedRepo SET to_email=? WHERE to_email=?"); rc = seaf_db_statement_query (seaf->db, sql->str, 2, "string", new_email, "string", old_email); if (rc < 0){ ccnet_warning ("Failed to update to_email\n"); goto out; } //3.update GroupUser rc = ccnet_group_manager_update_group_user (seaf->group_mgr, old_email, new_email); if (rc < 0){ ccnet_warning ("Failed to update group member\n"); goto out; } //4.update RepoUserToken g_string_printf (sql, "UPDATE RepoUserToken SET email=? WHERE email=?"); rc = seaf_db_statement_query (seaf->db, sql->str, 2, "string", new_email, "string", old_email); if (rc < 0){ ccnet_warning ("Failed to update repo user token\n"); goto out; } //5.uptede FolderUserPerm g_string_printf (sql, "UPDATE FolderUserPerm SET user=? WHERE user=?"); rc = seaf_db_statement_query (seaf->db, sql->str, 2, "string", new_email, "string", old_email); if (rc < 0){ ccnet_warning ("Failed to update user folder permission\n"); goto out; } //6.update EmailUser g_string_printf (sql, "UPDATE EmailUser SET email=? WHERE email=?"); rc = seaf_db_statement_query (manager->priv->db, sql->str, 2, "string", new_email, "string", old_email); if (rc < 0){ ccnet_warning ("Failed to update email user\n"); goto out; } //7.update UserQuota g_string_printf (sql, "UPDATE UserQuota SET user=? WHERE user=?"); rc = seaf_db_statement_query (seaf->db, sql->str, 2, "string", new_email, "string", old_email); if (rc < 0){ ccnet_warning ("Failed to update user quota\n"); goto out; } ret = 0; out: g_string_free (sql, TRUE); return ret; } ================================================ FILE: common/user-mgr.h ================================================ /* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */ #ifndef CCNET_USER_MGR_H #define CCNET_USER_MGR_H #include #include #define CCNET_TYPE_USER_MANAGER (ccnet_user_manager_get_type ()) #define CCNET_USER_MANAGER(obj) (G_TYPE_CHECK_INSTANCE_CAST ((obj), CCNET_TYPE_USER_MANAGER, CcnetUserManager)) #define CCNET_IS_USER_MANAGER(obj) (G_TYPE_CHECK_INSTANCE_TYPE ((obj), CCNET_TYPE_USER_MANAGER)) #define CCNET_USER_MANAGER_CLASS(klass) (G_TYPE_CHECK_CLASS_CAST ((klass), CCNET_TYPE_USER_MANAGER, CcnetUserManagerClass)) #define CCNET_IS_USER_MANAGER_CLASS(klass) (G_TYPE_CHECK_CLASS_TYPE ((klass), CCNET_TYPE_USER_MANAGER)) #define CCNET_USER_MANAGER_GET_CLASS(obj) (G_TYPE_INSTANCE_GET_CLASS ((obj), CCNET_TYPE_USER_MANAGER, CcnetUserManagerClass)) typedef struct _SeafileSession SeafileSession; typedef struct _CcnetUserManager CcnetUserManager; typedef struct _CcnetUserManagerClass CcnetUserManagerClass; typedef struct CcnetUserManagerPriv CcnetUserManagerPriv; struct _CcnetUserManager { GObject parent_instance; SeafileSession *session; char *userdb_path; GHashTable *user_hash; #ifdef HAVE_LDAP /* LDAP related */ gboolean use_ldap; char *ldap_host; #ifdef WIN32 gboolean use_ssl; #endif char **base_list; /* base DN from where all users can be reached */ char *filter; /* Additional search filter */ char *user_dn; /* DN of the admin user */ char *password; /* password for admin user */ char *login_attr; /* attribute name used for login */ gboolean follow_referrals; /* Follow referrals returned by the server. */ #endif int passwd_hash_iter; CcnetUserManagerPriv *priv; }; struct _CcnetUserManagerClass { GObjectClass parent_class; }; GType ccnet_user_manager_get_type (void); CcnetUserManager* ccnet_user_manager_new (SeafileSession *); int ccnet_user_manager_prepare (CcnetUserManager *manager); void ccnet_user_manager_free (CcnetUserManager *manager); void ccnet_user_manager_start (CcnetUserManager *manager); void ccnet_user_manager_set_max_users (CcnetUserManager *manager, gint64 max_users); int ccnet_user_manager_add_emailuser (CcnetUserManager *manager, const char *email, const char *encry_passwd, int is_staff, int is_active); int ccnet_user_manager_remove_emailuser (CcnetUserManager *manager, const char *source, const char *email); int ccnet_user_manager_validate_emailuser (CcnetUserManager *manager, const char *email, const char *passwd); CcnetEmailUser* ccnet_user_manager_get_emailuser (CcnetUserManager *manager, const char *email, GError **error); CcnetEmailUser* ccnet_user_manager_get_emailuser_with_import (CcnetUserManager *manager, const char *email, GError **error); CcnetEmailUser* ccnet_user_manager_get_emailuser_by_id (CcnetUserManager *manager, int id); /* * @source: "DB" or "LDAP". * @status: "", "active", or "inactive". returns all users when this argument is "". */ GList* ccnet_user_manager_get_emailusers (CcnetUserManager *manager, const char *source, int start, int limit, const char *status); GList* ccnet_user_manager_search_emailusers (CcnetUserManager *manager, const char *source, const char *keyword, int start, int limit); gint64 ccnet_user_manager_count_emailusers (CcnetUserManager *manager, const char *source); gint64 ccnet_user_manager_count_inactive_emailusers (CcnetUserManager *manager, const char *source); GList* ccnet_user_manager_filter_emailusers_by_emails(CcnetUserManager *manager, const char *emails); int ccnet_user_manager_update_emailuser (CcnetUserManager *manager, const char *source, int id, const char* passwd, int is_staff, int is_active); int ccnet_user_manager_update_role_emailuser (CcnetUserManager *manager, const char* email, const char* role); GList* ccnet_user_manager_get_superusers(CcnetUserManager *manager); /* Remove one specific peer-id binding to an email */ char * ccnet_user_manager_get_login_id (CcnetUserManager *manager, const char *primary_id); GList * ccnet_user_manager_get_emailusers_in_list (CcnetUserManager *manager, const char *source, const char *user_list, GError **error); int ccnet_user_manager_update_emailuser_id (CcnetUserManager *manager, const char *old_email, const char *new_email, GError **error); #endif ================================================ FILE: common/vc-common.c ================================================ #include "common.h" #include "seafile-session.h" #include "vc-common.h" #include "log.h" #include "seafile-error.h" static GList * merge_bases_many (SeafCommit *one, int n, SeafCommit **twos); static gint compare_commit_by_time (gconstpointer a, gconstpointer b, gpointer unused) { const SeafCommit *commit_a = a; const SeafCommit *commit_b = b; /* Latest commit comes first in the list. */ return (commit_b->ctime - commit_a->ctime); } static gint compare_commit (gconstpointer a, gconstpointer b) { const SeafCommit *commit_a = a; const SeafCommit *commit_b = b; return strcmp (commit_a->commit_id, commit_b->commit_id); } static gboolean add_to_commit_hash (SeafCommit *commit, void *vhash, gboolean *stop) { GHashTable *hash = vhash; char *key = g_strdup (commit->commit_id); g_hash_table_replace (hash, key, key); return TRUE; } static GHashTable * commit_tree_to_hash (SeafCommit *head) { GHashTable *hash; gboolean res; hash = g_hash_table_new_full (g_str_hash, g_str_equal, g_free, NULL); res = seaf_commit_manager_traverse_commit_tree (seaf->commit_mgr, head->repo_id, head->version, head->commit_id, add_to_commit_hash, hash, FALSE); if (!res) goto fail; return hash; fail: g_hash_table_destroy (hash); return NULL; } static GList * get_independent_commits (GList *commits) { SeafCommit **rslt; GList *list, *result; int cnt, i, j; SeafCommit *c; g_debug ("Get independent commits.\n"); cnt = g_list_length (commits); rslt = calloc(cnt, sizeof(*rslt)); for (list = commits, i = 0; list; list = list->next) rslt[i++] = list->data; g_list_free (commits); for (i = 0; i < cnt - 1; i++) { for (j = i+1; j < cnt; j++) { if (!rslt[i] || !rslt[j]) continue; result = merge_bases_many(rslt[i], 1, &rslt[j]); for (list = result; list; list = list->next) { c = list->data; /* If two commits have fast-forward relationship, * drop the older one. */ if (strcmp (rslt[i]->commit_id, c->commit_id) == 0) { seaf_commit_unref (rslt[i]); rslt[i] = NULL; } if (strcmp (rslt[j]->commit_id, c->commit_id) == 0) { seaf_commit_unref (rslt[j]); rslt[j] = NULL; } seaf_commit_unref (c); } } } /* Surviving ones in rslt[] are the independent results */ result = NULL; for (i = 0; i < cnt; i++) { if (rslt[i]) result = g_list_insert_sorted_with_data (result, rslt[i], compare_commit_by_time, NULL); } free(rslt); return result; } typedef struct { GList *result; GHashTable *commit_hash; } MergeTraverseData; static gboolean get_merge_bases (SeafCommit *commit, void *vdata, gboolean *stop) { MergeTraverseData *data = vdata; /* Found a common ancestor. * Dont traverse its parenets. */ if (g_hash_table_lookup (data->commit_hash, commit->commit_id)) { if (!g_list_find_custom (data->result, commit, compare_commit)) { data->result = g_list_insert_sorted_with_data (data->result, commit, compare_commit_by_time, NULL); seaf_commit_ref (commit); } *stop = TRUE; } return TRUE; } /* * Merge "one" with commits in "twos". * The ancestors returned may not be ancestors for all the input commits. * They are common ancestors for one and some commits in twos array. */ static GList * merge_bases_many (SeafCommit *one, int n, SeafCommit **twos) { GHashTable *commit_hash; GList *result = NULL; SeafCommit *commit; int i; MergeTraverseData data; gboolean res; for (i = 0; i < n; i++) { if (one == twos[i]) return g_list_append (result, one); } /* First construct a hash table of all commit ids rooted at one. */ commit_hash = commit_tree_to_hash (one); if (!commit_hash) { g_warning ("Failed to load commit hash.\n"); return NULL; } data.commit_hash = commit_hash; data.result = NULL; for (i = 0; i < n; i++) { res = seaf_commit_manager_traverse_commit_tree (seaf->commit_mgr, twos[i]->repo_id, twos[i]->version, twos[i]->commit_id, get_merge_bases, &data, FALSE); if (!res) goto fail; } g_hash_table_destroy (commit_hash); result = data.result; if (!result || !result->next) return result; /* There are more than one. Try to find out independent ones. */ result = get_independent_commits (result); return result; fail: result = data.result; while (result) { commit = result->data; seaf_commit_unref (commit); result = g_list_delete_link (result, result); } g_hash_table_destroy (commit_hash); return NULL; } /* * Returns common ancesstor for two branches. * Any two commits should have a common ancestor. * So returning NULL indicates an error, for e.g. corupt commit. */ SeafCommit * get_merge_base (SeafCommit *head, SeafCommit *remote) { GList *result, *iter; SeafCommit *one, **twos; int n, i; SeafCommit *ret = NULL; one = head; twos = (SeafCommit **) calloc (1, sizeof(SeafCommit *)); twos[0] = remote; n = 1; result = merge_bases_many (one, n, twos); free (twos); if (!result || !result->next) goto done; /* * More than one common ancestors. * Loop until the oldest common ancestor is found. */ while (1) { n = g_list_length (result) - 1; one = result->data; twos = calloc (n, sizeof(SeafCommit *)); for (iter = result->next, i = 0; i < n; iter = iter->next, i++) { twos[i] = iter->data; } g_list_free (result); result = merge_bases_many (one, n, twos); free (twos); if (!result || !result->next) break; } done: if (result) ret = result->data; g_list_free (result); return ret; } /* * Returns true if src_head is ahead of dst_head. */ gboolean is_fast_forward (const char *repo_id, int version, const char *src_head, const char *dst_head) { VCCompareResult res; res = vc_compare_commits (repo_id, version, src_head, dst_head); return (res == VC_FAST_FORWARD); } VCCompareResult vc_compare_commits (const char *repo_id, int version, const char *c1, const char *c2) { SeafCommit *commit1, *commit2, *ca; VCCompareResult ret; /* Treat the same as up-to-date. */ if (strcmp (c1, c2) == 0) return VC_UP_TO_DATE; commit1 = seaf_commit_manager_get_commit (seaf->commit_mgr, repo_id, version, c1); if (!commit1) return VC_INDEPENDENT; commit2 = seaf_commit_manager_get_commit (seaf->commit_mgr, repo_id, version, c2); if (!commit2) { seaf_commit_unref (commit1); return VC_INDEPENDENT; } ca = get_merge_base (commit1, commit2); if (!ca) ret = VC_INDEPENDENT; else if (strcmp(ca->commit_id, commit1->commit_id) == 0) ret = VC_UP_TO_DATE; else if (strcmp(ca->commit_id, commit2->commit_id) == 0) ret = VC_FAST_FORWARD; else ret = VC_INDEPENDENT; if (ca) seaf_commit_unref (ca); seaf_commit_unref (commit1); seaf_commit_unref (commit2); return ret; } /** * Diff a specific file with parent(s). * If @commit is a merge, both parents will be compared. * @commit must have this file and it's id is given in @file_id. * * Returns 0 if there is no difference; 1 otherwise. * If returns 0, @parent will point to the next commit to traverse. * If I/O error occurs, @error will be set. */ static int diff_parents_with_path (SeafCommit *commit, const char *repo_id, const char *store_id, int version, const char *path, const char *file_id, char *parent, GError **error) { SeafCommit *p1 = NULL, *p2 = NULL; char *file_id_p1 = NULL, *file_id_p2 = NULL; int ret = 0; p1 = seaf_commit_manager_get_commit (seaf->commit_mgr, commit->repo_id, commit->version, commit->parent_id); if (!p1) { g_warning ("Failed to find commit %s.\n", commit->parent_id); g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, " "); return 0; } if (strcmp (p1->root_id, EMPTY_SHA1) == 0) { seaf_commit_unref (p1); return 1; } if (commit->second_parent_id) { p2 = seaf_commit_manager_get_commit (seaf->commit_mgr, commit->repo_id, commit->version, commit->second_parent_id); if (!p2) { g_warning ("Failed to find commit %s.\n", commit->second_parent_id); seaf_commit_unref (p1); g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, " "); return 0; } } if (!p2) { file_id_p1 = seaf_fs_manager_path_to_obj_id (seaf->fs_mgr, store_id, version, p1->root_id, path, NULL, error); if (*error) goto out; if (!file_id_p1 || strcmp (file_id, file_id_p1) != 0) ret = 1; else memcpy (parent, p1->commit_id, 41); } else { file_id_p1 = seaf_fs_manager_path_to_obj_id (seaf->fs_mgr, store_id, version, p1->root_id, path, NULL, error); if (*error) goto out; file_id_p2 = seaf_fs_manager_path_to_obj_id (seaf->fs_mgr, store_id, version, p2->root_id, path, NULL, error); if (*error) goto out; if (file_id_p1 && file_id_p2) { if (strcmp(file_id, file_id_p1) != 0 && strcmp(file_id, file_id_p2) != 0) ret = 1; else if (strcmp(file_id, file_id_p1) == 0) memcpy (parent, p1->commit_id, 41); else memcpy (parent, p2->commit_id, 41); } else if (file_id_p1 && !file_id_p2) { if (strcmp(file_id, file_id_p1) != 0) ret = 1; else memcpy (parent, p1->commit_id, 41); } else if (!file_id_p1 && file_id_p2) { if (strcmp(file_id, file_id_p2) != 0) ret = 1; else memcpy (parent, p2->commit_id, 41); } else { ret = 1; } } out: g_free (file_id_p1); g_free (file_id_p2); if (p1) seaf_commit_unref (p1); if (p2) seaf_commit_unref (p2); return ret; } static int get_file_modifier_mtime_v0 (const char *repo_id, const char *store_id, int version, const char *head, const char *path, char **modifier, gint64 *mtime) { char commit_id[41]; SeafCommit *commit = NULL; char *file_id = NULL; int changed; int ret = 0; GError *error = NULL; *modifier = NULL; *mtime = 0; memcpy (commit_id, head, 41); while (1) { commit = seaf_commit_manager_get_commit (seaf->commit_mgr, repo_id, version, commit_id); if (!commit) { ret = -1; break; } /* We hit the initial commit. */ if (!commit->parent_id) break; file_id = seaf_fs_manager_path_to_obj_id (seaf->fs_mgr, store_id, version, commit->root_id, path, NULL, &error); if (error) { g_clear_error (&error); ret = -1; break; } /* We expect commit to have this file. */ if (!file_id) { ret = -1; break; } changed = diff_parents_with_path (commit, repo_id, store_id, version, path, file_id, commit_id, &error); if (error) { g_clear_error (&error); ret = -1; break; } if (changed) { *modifier = g_strdup (commit->creator_name); *mtime = commit->ctime; break; } else { /* If this commit doesn't change the file, commit_id will be set * to the parent commit to traverse. */ g_free (file_id); seaf_commit_unref (commit); } } g_free (file_id); if (commit) seaf_commit_unref (commit); return ret; } static int get_file_modifier_mtime_v1 (const char *repo_id, const char *store_id, int version, const char *head, const char *path, char **modifier, gint64 *mtime) { SeafCommit *commit = NULL; SeafDir *dir = NULL; SeafDirent *dent = NULL; int ret = 0; commit = seaf_commit_manager_get_commit (seaf->commit_mgr, repo_id, version, head); if (!commit) { seaf_warning ("Failed to get commit %s.\n", head); return -1; } char *parent = g_path_get_dirname (path); if (strcmp(parent, ".") == 0) { g_free (parent); parent = g_strdup(""); } char *filename = g_path_get_basename (path); dir = seaf_fs_manager_get_seafdir_by_path (seaf->fs_mgr, store_id, version, commit->root_id, parent, NULL); if (!dir) { seaf_warning ("dir %s doesn't exist in repo %s.\n", parent, repo_id); ret = -1; goto out; } GList *p; for (p = dir->entries; p; p = p->next) { SeafDirent *d = p->data; if (strcmp (d->name, filename) == 0) { dent = d; break; } } if (!dent) { goto out; } *modifier = g_strdup(dent->modifier); *mtime = dent->mtime; out: g_free (parent); g_free (filename); seaf_commit_unref (commit); seaf_dir_free (dir); return ret; } /** * Get the user who last changed a file and the mtime. * @head: head commit to start the search. * @path: path of the file. */ int get_file_modifier_mtime (const char *repo_id, const char *store_id, int version, const char *head, const char *path, char **modifier, gint64 *mtime) { if (version > 0) return get_file_modifier_mtime_v1 (repo_id, store_id, version, head, path, modifier, mtime); else return get_file_modifier_mtime_v0 (repo_id, store_id, version, head, path, modifier, mtime); } char * gen_conflict_path (const char *origin_path, const char *modifier, gint64 mtime) { char time_buf[64]; time_t t = (time_t)mtime; char *copy = g_strdup (origin_path); GString *conflict_path = g_string_new (NULL); char *dot, *ext; strftime(time_buf, 64, "%Y-%m-%d-%H-%M-%S", localtime(&t)); dot = strrchr (copy, '.'); if (dot != NULL) { *dot = '\0'; ext = dot + 1; if (modifier) g_string_printf (conflict_path, "%s (SFConflict %s %s).%s", copy, modifier, time_buf, ext); else g_string_printf (conflict_path, "%s (SFConflict %s).%s", copy, time_buf, ext); } else { if (modifier) g_string_printf (conflict_path, "%s (SFConflict %s %s)", copy, modifier, time_buf); else g_string_printf (conflict_path, "%s (SFConflict %s)", copy, time_buf); } g_free (copy); return g_string_free (conflict_path, FALSE); } char * gen_conflict_path_wrapper (const char *repo_id, int version, const char *head, const char *in_repo_path, const char *original_path) { char *modifier; gint64 mtime; /* XXX: this function is only used in client, so store_id is always * the same as repo_id. This can be changed if it's also called in * server. */ if (get_file_modifier_mtime (repo_id, repo_id, version, head, in_repo_path, &modifier, &mtime) < 0) return NULL; return gen_conflict_path (original_path, modifier, mtime); } ================================================ FILE: common/vc-common.h ================================================ #ifndef VC_COMMON_H #define VC_COMMON_H #include "commit-mgr.h" SeafCommit * get_merge_base (SeafCommit *head, SeafCommit *remote); /* * Returns true if src_head is ahead of dst_head. */ gboolean is_fast_forward (const char *repo_id, int version, const char *src_head, const char *dst_head); typedef enum { VC_UP_TO_DATE, VC_FAST_FORWARD, VC_INDEPENDENT, } VCCompareResult; /* * Compares commits c1 and c2 as if we were going to merge c1 into c2. * * Returns: * VC_UP_TO_DATE: if c2 is ahead of c1, or c1 == c2; * VC_FAST_FORWARD: if c1 is ahead of c2; * VC_INDEPENDENT: if c1 and c2 has no inheritent relationship. * Returns VC_INDEPENDENT if c1 or c2 doesn't exist. */ VCCompareResult vc_compare_commits (const char *repo_id, int version, const char *c1, const char *c2); char * gen_conflict_path (const char *original_path, const char *modifier, gint64 mtime); int get_file_modifier_mtime (const char *repo_id, const char *store_id, int version, const char *head, const char *path, char **modifier, gint64 *mtime); /* Wrapper around the above two functions */ char * gen_conflict_path_wrapper (const char *repo_id, int version, const char *head, const char *in_repo_path, const char *original_path); #endif ================================================ FILE: configure.ac ================================================ dnl Process this file with autoconf to produce a configure script. AC_PREREQ(2.61) AC_INIT([seafile], [6.0.1], [freeplant@gmail.com]) AC_CONFIG_HEADER([config.h]) AC_CONFIG_MACRO_DIR([m4]) AM_INIT_AUTOMAKE([1.9 foreign]) #AC_MINGW32 AC_CANONICAL_BUILD dnl enable the build of share library by default AC_ENABLE_SHARED AC_SUBST(LIBTOOL_DEPS) # Checks for programs. AC_PROG_CC #AM_C_PROTOTYPES AC_C_CONST AC_PROG_MAKE_SET # AC_PROG_RANLIB LT_INIT # Checks for headers. #AC_CHECK_HEADERS([arpa/inet.h fcntl.h inttypes.h libintl.h limits.h locale.h netdb.h netinet/in.h stdint.h stdlib.h string.h strings.h sys/ioctl.h sys/socket.h sys/time.h termios.h unistd.h utime.h utmp.h]) # Checks for typedefs, structures, and compiler characteristics. AC_SYS_LARGEFILE # Checks for library functions. #AC_CHECK_FUNCS([alarm dup2 ftruncate getcwd gethostbyname gettimeofday memmove memset mkdir rmdir select setlocale socket strcasecmp strchr strdup strrchr strstr strtol uname utime strtok_r sendfile]) # check platform AC_MSG_CHECKING(for WIN32) if test "$build_os" = "mingw32" -o "$build_os" = "mingw64"; then bwin32=true AC_MSG_RESULT(compile in mingw) else AC_MSG_RESULT(no) fi AC_MSG_CHECKING(for Mac) if test "$(uname)" = "Darwin"; then bmac=true AC_MSG_RESULT(compile in mac) else AC_MSG_RESULT(no) fi AC_MSG_CHECKING(for Linux) if test "$bmac" != "true" -a "$bwin32" != "true"; then blinux=true AC_MSG_RESULT(compile in linux) else AC_MSG_RESULT(no) fi # test which sub-component to compile if test "$bwin32" = true; then compile_tools=no fi if test "$bmac" = true; then compile_tools=no fi if test "$blinux" = true; then compile_tools=yes fi if test "$bwin32" != true; then AC_ARG_ENABLE(fuse, AC_HELP_STRING([--enable-fuse], [enable fuse virtual file system]), [compile_fuse=$enableval],[compile_fuse="yes"]) fi AC_ARG_ENABLE(python, AC_HELP_STRING([--enable-python],[build seafile python binding]), [compile_python=$enableval], [compile_python=yes]) AC_ARG_WITH(mysql, AC_HELP_STRING([--with-mysql],[path to mysql_config]), [MYSQL_CONFIG=$with_mysql], [MYSQL_CONFIG="default_mysql_config"]) AC_ARG_ENABLE(httpserver, AC_HELP_STRING([--enable-httpserver], [enable httpserver]), [compile_httpserver=$enableval],[compile_httpserver="yes"]) AM_CONDITIONAL([COMPILE_TOOLS], [test "${compile_tools}" = "yes"]) AM_CONDITIONAL([COMPILE_PYTHON], [test "${compile_python}" = "yes"]) AM_CONDITIONAL([COMPILE_FUSE], [test "${compile_fuse}" = "yes"]) AM_CONDITIONAL([WIN32], [test "$bwin32" = "true"]) AM_CONDITIONAL([MACOS], [test "$bmac" = "true"]) AM_CONDITIONAL([LINUX], [test "$blinux" = "true"]) # check libraries if test "$bwin32" != true; then if test "$bmac" = true; then AC_CHECK_LIB(c, uuid_generate, [echo "found library uuid"], AC_MSG_ERROR([*** Unable to find uuid_generate in libc]), ) else AC_CHECK_LIB(uuid, uuid_generate, [echo "found library uuid"], AC_MSG_ERROR([*** Unable to find uuid library]), ) fi fi AC_CHECK_LIB(pthread, pthread_create, [echo "found library pthread"], AC_MSG_ERROR([*** Unable to find pthread library]), ) AC_CHECK_LIB(sqlite3, sqlite3_open,[echo "found library sqlite3"] , AC_MSG_ERROR([*** Unable to find sqlite3 library]), ) AC_CHECK_LIB(crypto, SHA1_Init, [echo "found library crypto"], AC_MSG_ERROR([*** Unable to find openssl crypto library]), ) dnl Do we need to use AX_LIB_SQLITE3 to check sqlite? dnl AX_LIB_SQLITE3 CONSOLE= if test "$bwin32" = "true"; then AC_ARG_ENABLE(console, AC_HELP_STRING([--enable-console], [enable console]), [console=$enableval],[console="yes"]) if test x${console} != xyes ; then CONSOLE="-Wl,--subsystem,windows -Wl,--entry,_mainCRTStartup" fi fi AC_SUBST(CONSOLE) if test "$bwin32" = true; then LIB_WS32=-lws2_32 LIB_GDI32=-lgdi32 LIB_RT= LIB_INTL=-lintl LIBS= LIB_RESOLV= LIB_UUID=-lRpcrt4 LIB_IPHLPAPI=-liphlpapi LIB_SHELL32=-lshell32 LIB_PSAPI=-lpsapi LIB_MAC= MSVC_CFLAGS="-D__MSVCRT__ -D__MSVCRT_VERSION__=0x0601" LIB_CRYPT32=-lcrypt32 LIB_ICONV=-liconv elif test "$bmac" = true ; then LIB_WS32= LIB_GDI32= LIB_RT= LIB_INTL= LIB_RESOLV=-lresolv LIB_UUID= LIB_IPHLPAPI= LIB_SHELL32= LIB_PSAPI= MSVC_CFLAGS= LIB_MAC="-framework CoreServices" LIB_CRYPT32= LIB_ICONV=-liconv else LIB_WS32= LIB_GDI32= LIB_RT= LIB_INTL= LIB_RESOLV=-lresolv LIB_UUID=-luuid LIB_IPHLPAPI= LIB_SHELL32= LIB_PSAPI= LIB_MAC= MSVC_CFLAGS= LIB_CRYPT32= fi AC_SUBST(LIB_WS32) AC_SUBST(LIB_GDI32) AC_SUBST(LIB_RT) AC_SUBST(LIB_INTL) AC_SUBST(LIB_RESOLV) AC_SUBST(LIB_UUID) AC_SUBST(LIB_IPHLPAPI) AC_SUBST(LIB_SHELL32) AC_SUBST(LIB_PSAPI) AC_SUBST(LIB_MAC) AC_SUBST(MSVC_CFLAGS) AC_SUBST(LIB_CRYPT32) AC_SUBST(LIB_ICONV) LIBEVENT_REQUIRED=2.0 GLIB_REQUIRED=2.16.0 SEARPC_REQUIRED=1.0 JANSSON_REQUIRED=2.2.1 ZDB_REQUIRED=2.10 #LIBNAUTILUS_EXTENSION_REQUIRED=2.30.1 CURL_REQUIRED=7.17 FUSE_REQUIRED=2.7.3 ZLIB_REQUIRED=1.2.0 LIHIBREDIS_REQUIRED=0.15.0 PKG_CHECK_MODULES(SSL, [openssl]) AC_SUBST(SSL_CFLAGS) AC_SUBST(SSL_LIBS) PKG_CHECK_MODULES(GLIB2, [glib-2.0 >= $GLIB_REQUIRED]) AC_SUBST(GLIB2_CFLAGS) AC_SUBST(GLIB2_LIBS) PKG_CHECK_MODULES(GOBJECT, [gobject-2.0 >= $GLIB_REQUIRED]) AC_SUBST(GOBJECT_CFLAGS) AC_SUBST(GOBJECT_LIBS) PKG_CHECK_MODULES(SEARPC, [libsearpc >= $SEARPC_REQUIRED]) AC_SUBST(SEARPC_CFLAGS) AC_SUBST(SEARPC_LIBS) PKG_CHECK_MODULES(JANSSON, [jansson >= $JANSSON_REQUIRED]) AC_SUBST(JANSSON_CFLAGS) AC_SUBST(JANSSON_LIBS) PKG_CHECK_MODULES(LIBEVENT, [libevent >= $LIBEVENT_REQUIRED]) AC_SUBST(LIBEVENT_CFLAGS) AC_SUBST(LIBEVENT_LIBS) PKG_CHECK_MODULES(ZLIB, [zlib >= $ZLIB_REQUIRED]) AC_SUBST(ZLIB_CFLAGS) AC_SUBST(ZLIB_LIBS) if test "x${MYSQL_CONFIG}" = "xdefault_mysql_config"; then PKG_CHECK_MODULES(MYSQL, [mysqlclient], [have_mysql="yes"], [have_mysql="no"]) if test "x${have_mysql}" = "xyes"; then AC_SUBST(MYSQL_CFLAGS) AC_SUBST(MYSQL_LIBS) AC_DEFINE([HAVE_MYSQL], 1, [Define to 1 if MySQL support is enabled]) fi else AC_MSG_CHECKING([for MySQL]) MYSQL_CFLAGS=`${MYSQL_CONFIG} --include` MYSQL_LIBS=`${MYSQL_CONFIG} --libs` AC_MSG_RESULT([${MYSQL_CFLAGS}]) AC_SUBST(MYSQL_CFLAGS) AC_SUBST(MYSQL_LIBS) AC_DEFINE([HAVE_MYSQL], 1, [Define to 1 if MySQL support is enabled]) fi if test "${compile_httpserver}" = "yes"; then AC_DEFINE([HAVE_EVHTP], [1], [Define to 1 if httpserver is enabled.]) AC_SUBST(EVHTP_LIBS, "-levhtp") fi PKG_CHECK_MODULES(LIBHIREDIS, [hiredis >= $LIHIBREDIS_REQUIRED]) AC_SUBST(LIBHIREDIS_CFLAGS) AC_SUBST(LIBHIREDIS_LIBS) PKG_CHECK_MODULES(CURL, [libcurl >= $CURL_REQUIRED]) AC_SUBST(CURL_CFLAGS) AC_SUBST(CURL_LIBS) PKG_CHECK_MODULES(JWT, [libjwt]) AC_SUBST(JWT_CFLAGS) AC_SUBST(JWT_LIBS) PKG_CHECK_MODULES(ARGON2, [libargon2]) AC_SUBST(ARGON2_CFLAGS) AC_SUBST(ARGON2_LIBS) if test x${compile_python} = xyes; then AM_PATH_PYTHON([2.6]) if test "$bwin32" = true; then if test x$PYTHON_DIR != x; then # set pyexecdir to somewhere like /c/Python26/Lib/site-packages pyexecdir=${PYTHON_DIR}/Lib/site-packages pythondir=${pyexecdir} pkgpyexecdir=${pyexecdir}/${PACKAGE} pkgpythondir=${pythondir}/${PACKAGE} fi fi fi if test "${compile_fuse}" = "yes"; then PKG_CHECK_MODULES(FUSE, [fuse >= $FUSE_REQUIRED]) AC_SUBST(FUSE_CFLAGS) AC_SUBST(FUSE_LIBS) fi dnl check libarchive LIBARCHIVE_REQUIRED=2.8.5 PKG_CHECK_MODULES(LIBARCHIVE, [libarchive >= $LIBARCHIVE_REQUIRED]) AC_SUBST(LIBARCHIVE_CFLAGS) AC_SUBST(LIBARCHIVE_LIBS) ac_configure_args="$ac_configure_args -q" AC_CONFIG_FILES( Makefile include/Makefile fuse/Makefile lib/Makefile lib/libseafile.pc common/Makefile common/cdc/Makefile server/Makefile server/gc/Makefile python/Makefile python/seafile/Makefile python/seaserv/Makefile controller/Makefile tools/Makefile doc/Makefile scripts/Makefile ) AC_OUTPUT ================================================ FILE: controller/Makefile.am ================================================ bin_PROGRAMS = seafile-controller AM_CFLAGS = \ -DSEAFILE_SERVER \ -I$(top_srcdir)/include \ -I$(top_srcdir)/lib \ -I$(top_builddir)/lib \ -I$(top_srcdir)/common \ @SEARPC_CFLAGS@ \ @GLIB2_CFLAGS@ \ -Wall noinst_HEADERS = seafile-controller.h ../common/log.h seafile_controller_SOURCES = seafile-controller.c ../common/log.c seafile_controller_LDADD = $(top_builddir)/lib/libseafile_common.la \ @GLIB2_LIBS@ @GOBJECT_LIBS@ @SSL_LIBS@ @LIB_RT@ @LIB_UUID@ @LIBEVENT_LIBS@ \ @SEARPC_LIBS@ @JANSSON_LIBS@ @ZLIB_LIBS@ ================================================ FILE: controller/seafile-controller.c ================================================ /* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */ #include "common.h" #include #include #include #include #include #include #include #include #include #include #include "utils.h" #include "log.h" #include "seafile-controller.h" #define CHECK_PROCESS_INTERVAL 10 /* every 10 seconds */ #if defined(__sun) #define PROC_SELF_PATH "/proc/self/path/a.out" #else #define PROC_SELF_PATH "/proc/self/exe" #endif SeafileController *ctl; static char *controller_pidfile = NULL; char *bin_dir = NULL; char *installpath = NULL; char *topdir = NULL; gboolean enabled_go_fileserver = FALSE; char *seafile_ld_library_path = NULL; static const char *short_opts = "hvftc:d:l:g:G:P:F:"; static const struct option long_opts[] = { { "help", no_argument, NULL, 'h', }, { "version", no_argument, NULL, 'v', }, { "foreground", no_argument, NULL, 'f', }, { "test", no_argument, NULL, 't', }, { "config-dir", required_argument, NULL, 'c', }, { "seafile-dir", required_argument, NULL, 'd', }, { "central-config-dir", required_argument, NULL, 'F' }, { "logdir", required_argument, NULL, 'l', }, { "ccnet-debug-level", required_argument, NULL, 'g' }, { "seafile-debug-level", required_argument, NULL, 'G' }, { "pidfile", required_argument, NULL, 'P' }, { NULL, 0, NULL, 0, }, }; static void controller_exit (int code) __attribute__((noreturn)); static int read_seafdav_config(); static void controller_exit (int code) { if (code != 0) { seaf_warning ("seaf-controller exited with code %d\n", code); } exit(code); } // // Utility functions Start // /* returns the pid of the newly created process */ static int spawn_process (char *argv[], bool is_python_process) { char **ptr = argv; GString *buf = g_string_new(argv[0]); while (*(++ptr)) { g_string_append_printf (buf, " %s", *ptr); } seaf_message ("spawn_process: %s\n", buf->str); g_string_free (buf, TRUE); int pipefd[2] = {0, 0}; if (is_python_process) { if (pipe(pipefd) < 0) { seaf_warning("Failed to create pipe.\n"); } fcntl(pipefd[0], F_SETFL, O_NONBLOCK); } pid_t pid = fork(); if (pid == 0) { if (is_python_process) { if (pipefd[0] > 0 && pipefd[1] > 0) { close(pipefd[0]); dup2(pipefd[1], 2); } } /* child process */ execvp (argv[0], argv); seaf_warning ("failed to execvp %s\n", argv[0]); if (pipefd[1] > 0) { close(pipefd[1]); } exit(-1); } else { /* controller */ if (pid == -1) seaf_warning ("error when fork %s: %s\n", argv[0], strerror(errno)); else seaf_message ("spawned %s, pid %d\n", argv[0], pid); if (is_python_process) { char child_stderr[1024] = {0}; if (pipefd[0] > 0 && pipefd[1] > 0){ close(pipefd[1]); sleep(1); while (read(pipefd[0], child_stderr, sizeof(child_stderr)) > 0) seaf_warning("%s", child_stderr); close(pipefd[0]); } } return (int)pid; } } #define PID_ERROR_ENOENT 0 #define PID_ERROR_OTHER -1 /** * @return * - pid if successfully opened and read the file * - PID_ERROR_ENOENT if file not exists, * - PID_ERROR_OTHER if other errors */ static int read_pid_from_pidfile (const char *pidfile) { FILE *pf = g_fopen (pidfile, "r"); if (!pf) { if (errno == ENOENT) { return PID_ERROR_ENOENT; } else { return PID_ERROR_OTHER; } } int pid = PID_ERROR_OTHER; if (fscanf (pf, "%d", &pid) < 0) { seaf_warning ("bad pidfile format: %s\n", pidfile); fclose(pf); return PID_ERROR_OTHER; } fclose(pf); return pid; } static void kill_by_force (int which) { if (which < 0 || which >= N_PID) return; char *pidfile = ctl->pidfile[which]; int pid = read_pid_from_pidfile(pidfile); if (pid > 0) { // if SIGKILL send success, then remove related pid file if (kill ((pid_t)pid, SIGKILL) == 0) { g_unlink (pidfile); } } } // // Utility functions End // static int start_seaf_server () { if (!ctl->config_dir || !ctl->seafile_dir) return -1; seaf_message ("starting seaf-server ...\n"); static char *logfile = NULL; if (logfile == NULL) { logfile = g_build_filename (ctl->logdir, "seafile.log", NULL); } char *argv[] = { "seaf-server", "-F", ctl->central_config_dir, "-c", ctl->config_dir, "-d", ctl->seafile_dir, "-l", logfile, "-P", ctl->pidfile[PID_SERVER], "-p", ctl->rpc_pipe_path, NULL}; int pid = spawn_process (argv, false); if (pid <= 0) { seaf_warning ("Failed to spawn seaf-server\n"); return -1; } return 0; } static int start_go_fileserver() { if (!ctl->central_config_dir || !ctl->seafile_dir) return -1; static char *logfile = NULL; if (logfile == NULL) { logfile = g_build_filename (ctl->logdir, "fileserver.log", NULL); } char *argv[] = { "fileserver", "-F", ctl->central_config_dir, "-d", ctl->seafile_dir, "-l", logfile, "-p", ctl->rpc_pipe_path, "-P", ctl->pidfile[PID_FILESERVER], NULL}; seaf_message ("starting go-fileserver ..."); int pid = spawn_process(argv, false); if (pid <= 0) { seaf_warning("Failed to spawn fileserver\n"); return -1; } return 0; } static const char * get_python_executable() { static const char *python = NULL; if (python != NULL) { return python; } static const char *try_list[] = { "python3" }; int i; for (i = 0; i < G_N_ELEMENTS(try_list); i++) { char *binary = g_find_program_in_path (try_list[i]); if (binary != NULL) { python = binary; break; } } if (python == NULL) { python = g_getenv ("PYTHON"); if (python == NULL) { python = "python"; } } return python; } static void init_seafile_path () { GError *error = NULL; char *binary = g_file_read_link (PROC_SELF_PATH, &error); char *tmp = NULL; if (error != NULL) { seaf_warning ("failed to readlink: %s\n", error->message); return; } bin_dir = g_path_get_dirname (binary); tmp = g_path_get_dirname (bin_dir); installpath = g_path_get_dirname (tmp); topdir = g_path_get_dirname (installpath); g_free (binary); g_free (tmp); } static void setup_python_path() { static GList *path_list = NULL; if (path_list != NULL) { /* Only setup once */ return; } /* Allow seafdav to access seahub_settings.py */ path_list = g_list_prepend (path_list, g_build_filename (topdir, "conf", NULL)); path_list = g_list_prepend (path_list, g_build_filename (installpath, "seahub", NULL)); path_list = g_list_prepend (path_list, g_build_filename (installpath, "seahub/thirdpart", NULL)); path_list = g_list_prepend (path_list, g_build_filename (installpath, "seahub/seahub-extra", NULL)); path_list = g_list_prepend (path_list, g_build_filename (installpath, "seahub/seahub-extra/thirdparts", NULL)); path_list = g_list_prepend (path_list, g_build_filename (installpath, "seafile/lib/python3/site-packages", NULL)); path_list = g_list_prepend (path_list, g_build_filename (installpath, "seafile/lib64/python3/site-packages", NULL)); path_list = g_list_reverse (path_list); GList *ptr; GString *new_pypath = g_string_new (g_getenv("PYTHONPATH")); for (ptr = path_list; ptr != NULL; ptr = ptr->next) { const char *path = (char *)ptr->data; g_string_append_c (new_pypath, ':'); g_string_append (new_pypath, path); } g_setenv ("PYTHONPATH", g_string_free (new_pypath, FALSE), TRUE); /* seaf_message ("PYTHONPATH is:\n\n%s\n", g_getenv ("PYTHONPATH")); */ } static void setup_env () { g_setenv ("CCNET_CONF_DIR", ctl->config_dir, TRUE); g_setenv ("SEAFILE_CONF_DIR", ctl->seafile_dir, TRUE); g_setenv ("SEAFILE_CENTRAL_CONF_DIR", ctl->central_config_dir, TRUE); g_setenv ("SEAFILE_RPC_PIPE_PATH", ctl->rpc_pipe_path, TRUE); char *seahub_dir = g_build_filename (installpath, "seahub", NULL); char *seafdav_conf = g_build_filename (ctl->central_config_dir, "seafdav.conf", NULL); g_setenv ("SEAHUB_DIR", seahub_dir, TRUE); g_setenv ("SEAFDAV_CONF", seafdav_conf, TRUE); setup_python_path(); } static int start_seafdav() { static char *seafdav_log_file = NULL; if (seafdav_log_file == NULL) seafdav_log_file = g_build_filename (ctl->logdir, "seafdav.log", NULL); SeafDavConfig conf = ctl->seafdav_config; char port[16]; snprintf (port, sizeof(port), "%d", conf.port); int pid; if (conf.debug_mode) { char *argv[] = { (char *)get_python_executable(), "-m", "wsgidav.server.server_cli", "--server", "gunicorn", "--root", "/", "--log-file", seafdav_log_file, "--pid", ctl->pidfile[PID_SEAFDAV], "--port", port, "--host", conf.host, "-v", NULL }; pid = spawn_process (argv, true); } else { char *argv[] = { (char *)get_python_executable(), "-m", "wsgidav.server.server_cli", "--server", "gunicorn", "--root", "/", "--log-file", seafdav_log_file, "--pid", ctl->pidfile[PID_SEAFDAV], "--port", port, "--host", conf.host, NULL }; pid = spawn_process (argv, true); } if (pid <= 0) { seaf_warning ("Failed to spawn seafdav\n"); return -1; } return 0; } static void run_controller_loop () { GMainLoop *mainloop = g_main_loop_new (NULL, FALSE); g_main_loop_run (mainloop); } static gboolean need_restart (int which) { if (which < 0 || which >= N_PID) return FALSE; int pid = read_pid_from_pidfile (ctl->pidfile[which]); if (pid == PID_ERROR_ENOENT) { seaf_warning ("pid file %s does not exist\n", ctl->pidfile[which]); return TRUE; } else if (pid == PID_ERROR_OTHER) { seaf_warning ("failed to read pidfile %s: %s\n", ctl->pidfile[which], strerror(errno)); return FALSE; } else { char buf[256]; snprintf (buf, sizeof(buf), "/proc/%d", pid); if (g_file_test (buf, G_FILE_TEST_IS_DIR)) { return FALSE; } else { seaf_warning ("path /proc/%d doesn't exist, restart progress [%d]\n", pid, which); return TRUE; } } } static gboolean should_start_go_fileserver() { char *seafile_conf = g_build_filename (ctl->central_config_dir, "seafile.conf", NULL); GKeyFile *key_file = g_key_file_new (); gboolean ret = 0; if (!g_key_file_load_from_file (key_file, seafile_conf, G_KEY_FILE_KEEP_COMMENTS, NULL)) { seaf_warning("Failed to load seafile.conf.\n"); ret = FALSE; goto out; } GError *err = NULL; gboolean enabled; enabled = g_key_file_get_boolean(key_file, "fileserver", "use_go_fileserver", &err); if (err) { seaf_warning("Config [fileserver, use_go_fileserver] not set, default is FALSE.\n"); ret = FALSE; g_clear_error(&err); } else { if (enabled) { ret = TRUE; } else { ret = FALSE; } } if (ret) { char *type = NULL; type = g_key_file_get_string (key_file, "database", "type", NULL); if (!type || g_strcmp0 (type, "mysql") != 0) { seaf_message ("Use C fileserver because go fileserver does not support sqlite."); ret = FALSE; } g_free (type); } out: g_key_file_free (key_file); g_free (seafile_conf); return ret; } static gboolean check_process (void *data) { if (need_restart(PID_SERVER)) { seaf_message ("seaf-server need restart...\n"); start_seaf_server(); } if (enabled_go_fileserver) { if (need_restart(PID_FILESERVER)) { seaf_message("fileserver need restart...\n"); start_go_fileserver(); } } if (ctl->seafdav_config.enabled) { if (need_restart(PID_SEAFDAV)) { seaf_message ("seafdav need restart...\n"); start_seafdav (); } } return TRUE; } static void start_process_monitor () { ctl->check_process_timer = g_timeout_add ( CHECK_PROCESS_INTERVAL * 1000, check_process, NULL); } static int seaf_controller_start (); /* This would also stop seaf-server & other components */ static void stop_services () { seaf_message ("shutting down all services ...\n"); kill_by_force(PID_SERVER); kill_by_force(PID_FILESERVER); kill_by_force(PID_SEAFDAV); } static void init_pidfile_path (SeafileController *ctl) { char *pid_dir = g_build_filename (topdir, "pids", NULL); if (!g_file_test(pid_dir, G_FILE_TEST_EXISTS)) { if (g_mkdir(pid_dir, 0777) < 0) { seaf_warning("failed to create pid dir %s: %s", pid_dir, strerror(errno)); controller_exit(1); } } ctl->pidfile[PID_SERVER] = g_build_filename (pid_dir, "seaf-server.pid", NULL); ctl->pidfile[PID_SEAFDAV] = g_build_filename (pid_dir, "seafdav.pid", NULL); ctl->pidfile[PID_FILESERVER] = g_build_filename (pid_dir, "fileserver.pid", NULL); } static int seaf_controller_init (SeafileController *ctl, char *central_config_dir, char *config_dir, char *seafile_dir, char *logdir) { init_seafile_path (); if (!g_file_test (config_dir, G_FILE_TEST_IS_DIR)) { seaf_warning ("invalid config_dir: %s\n", config_dir); return -1; } if (!g_file_test (seafile_dir, G_FILE_TEST_IS_DIR)) { seaf_warning ("invalid seafile_dir: %s\n", seafile_dir); return -1; } if (logdir == NULL) { char *topdir = g_path_get_dirname(config_dir); logdir = g_build_filename (topdir, "logs", NULL); if (checkdir_with_mkdir(logdir) < 0) { seaf_error ("failed to create log folder \"%s\": %s\n", logdir, strerror(errno)); return -1; } g_free (topdir); } ctl->central_config_dir = central_config_dir; ctl->config_dir = config_dir; ctl->seafile_dir = seafile_dir; ctl->rpc_pipe_path = g_build_filename (installpath, "runtime", NULL); ctl->logdir = logdir; if (read_seafdav_config() < 0) { return -1; } init_pidfile_path (ctl); setup_env (); return 0; } static int seaf_controller_start () { if (start_seaf_server() < 0) { seaf_warning ("Failed to start seaf server\n"); return -1; } if (enabled_go_fileserver) { if (start_go_fileserver() < 0) { seaf_warning ("Failed to start fileserver\n"); return -1; } } start_process_monitor (); return 0; } static int write_controller_pidfile () { if (!controller_pidfile) return -1; pid_t pid = getpid(); FILE *pidfile = g_fopen(controller_pidfile, "w"); if (!pidfile) { seaf_warning ("Failed to fopen() pidfile %s: %s\n", controller_pidfile, strerror(errno)); return -1; } char buf[32]; snprintf (buf, sizeof(buf), "%d\n", pid); if (fputs(buf, pidfile) < 0) { seaf_warning ("Failed to write pidfile %s: %s\n", controller_pidfile, strerror(errno)); fclose (pidfile); return -1; } fflush (pidfile); fclose (pidfile); return 0; } static void remove_controller_pidfile () { if (controller_pidfile) { g_unlink (controller_pidfile); } } static void sigint_handler (int signo) { stop_services (); remove_controller_pidfile(); signal (signo, SIG_DFL); raise (signo); } static void sigchld_handler (int signo) { waitpid (-1, NULL, WNOHANG); } static void sigusr1_handler (int signo) { seafile_log_reopen(); } static void set_signal_handlers () { signal (SIGINT, sigint_handler); signal (SIGTERM, sigint_handler); signal (SIGCHLD, sigchld_handler); signal (SIGUSR1, sigusr1_handler); signal (SIGPIPE, SIG_IGN); } static void usage () { fprintf (stderr, "Usage: seafile-controller OPTIONS\n" "OPTIONS:\n" " -b, --bin-dir insert a directory in front of the PATH env\n" " -c, --config-dir ccnet config dir\n" " -d, --seafile-dir seafile dir\n" ); } /* seafile-controller -t is used to test whether config file is valid */ static void test_config (const char *central_config_dir, const char *ccnet_dir, const char *seafile_dir) { char buf[1024]; GError *error = NULL; int retcode = 0; char *child_stdout = NULL; char *child_stderr = NULL; snprintf (buf, sizeof(buf), "seaf-server -F \"%s\" -c \"%s\" -d \"%s\" -t -f", central_config_dir, ccnet_dir, seafile_dir); g_spawn_command_line_sync (buf, &child_stdout, &child_stderr, &retcode, &error); if (error != NULL) { seaf_error ("failed to run \"seaf-server -t\": %s\n", error->message); exit (1); } if (child_stdout) { fputs (child_stdout, stdout); } if (child_stderr) { fputs (child_stderr, stdout); } if (retcode != 0) { seaf_error ("failed to run \"seaf-server -t\" [%d]\n", retcode); exit (1); } exit(0); } static int read_seafdav_config() { int ret = 0; char *seafdav_conf = NULL; GKeyFile *key_file = NULL; GError *error = NULL; seafdav_conf = g_build_filename(ctl->central_config_dir, "seafdav.conf", NULL); if (!g_file_test(seafdav_conf, G_FILE_TEST_EXISTS)) { goto out; } key_file = g_key_file_new (); if (!g_key_file_load_from_file (key_file, seafdav_conf, G_KEY_FILE_KEEP_COMMENTS, NULL)) { seaf_warning("Failed to load seafdav.conf\n"); ret = -1; goto out; } /* enabled */ ctl->seafdav_config.enabled = g_key_file_get_boolean(key_file, "WEBDAV", "enabled", &error); if (error != NULL) { if (error->code != G_KEY_FILE_ERROR_KEY_NOT_FOUND) { seaf_message ("Error when reading WEBDAV.enabled, use default value 'false'\n"); } ctl->seafdav_config.enabled = FALSE; g_clear_error (&error); goto out; } if (!ctl->seafdav_config.enabled) { goto out; } /* host */ char *host = seaf_key_file_get_string (key_file, "WEBDAV", "host", &error); if (error != NULL) { g_clear_error(&error); ctl->seafdav_config.host = g_strdup("0.0.0.0"); } else { ctl->seafdav_config.host = host; } /* port */ ctl->seafdav_config.port = g_key_file_get_integer(key_file, "WEBDAV", "port", &error); if (error != NULL) { if (error->code != G_KEY_FILE_ERROR_KEY_NOT_FOUND) { seaf_message ("Error when reading WEBDAV.port, use deafult value 8080\n"); } ctl->seafdav_config.port = 8080; g_clear_error (&error); } ctl->seafdav_config.debug_mode = g_key_file_get_boolean (key_file, "WEBDAV", "debug", &error); if (error != NULL) { if (error->code != G_KEY_FILE_ERROR_KEY_NOT_FOUND) { seaf_message ("Error when reading WEBDAV.debug, use deafult value FALSE\n"); } ctl->seafdav_config.debug_mode = FALSE; g_clear_error (&error); } if (ctl->seafdav_config.port <= 0 || ctl->seafdav_config.port > 65535) { seaf_warning("Failed to load seafdav config: invalid port %d\n", ctl->seafdav_config.port); ret = -1; goto out; } out: if (key_file) { g_key_file_free (key_file); } g_free (seafdav_conf); return ret; } static int init_syslog_config () { char *seafile_conf = g_build_filename (ctl->central_config_dir, "seafile.conf", NULL); GKeyFile *key_file = g_key_file_new (); int ret = 0; if (!g_key_file_load_from_file (key_file, seafile_conf, G_KEY_FILE_KEEP_COMMENTS, NULL)) { seaf_warning("Failed to load seafile.conf.\n"); ret = -1; goto out; } set_syslog_config (key_file); out: g_key_file_free (key_file); g_free (seafile_conf); return ret; } int main (int argc, char **argv) { if (argc <= 1) { usage (); exit (1); } char *config_dir = DEFAULT_CONFIG_DIR; char *central_config_dir = NULL; char *seafile_dir = NULL; char *logdir = NULL; char *ccnet_debug_level_str = "info"; char *seafile_debug_level_str = "debug"; int daemon_mode = 1; gboolean test_conf = FALSE; int c; while ((c = getopt_long (argc, argv, short_opts, long_opts, NULL)) != EOF) { switch (c) { case 'h': usage (); exit(1); break; case 'v': fprintf (stderr, "seafile-controller version 1.0\n"); exit(1); break; case 't': test_conf = TRUE; break; case 'c': config_dir = optarg; break; case 'F': central_config_dir = g_strdup(optarg); break; case 'd': seafile_dir = g_strdup(optarg); break; case 'f': daemon_mode = 0; break; case 'L': logdir = g_strdup(optarg); break; case 'g': ccnet_debug_level_str = optarg; break; case 'G': seafile_debug_level_str = optarg; break; case 'P': controller_pidfile = optarg; break; default: usage (); exit (1); } } #if !GLIB_CHECK_VERSION(2, 35, 0) g_type_init(); #endif #if !GLIB_CHECK_VERSION(2,32,0) g_thread_init (NULL); #endif if (!seafile_dir) { fprintf (stderr, " must be specified with --seafile-dir\n"); exit(1); } if (!central_config_dir) { fprintf (stderr, " must be specified with --central-config-dir\n"); exit(1); } central_config_dir = ccnet_expand_path (central_config_dir); config_dir = ccnet_expand_path (config_dir); seafile_dir = ccnet_expand_path (seafile_dir); if (test_conf) { test_config (central_config_dir, config_dir, seafile_dir); } ctl = g_new0 (SeafileController, 1); if (seaf_controller_init (ctl, central_config_dir, config_dir, seafile_dir, logdir) < 0) { controller_exit(1); } char *logfile = g_build_filename (ctl->logdir, "controller.log", NULL); if (seafile_log_init (logfile, ccnet_debug_level_str, seafile_debug_level_str, "seafile-controller") < 0) { fprintf (stderr, "Failed to init log.\n"); controller_exit (1); } if (init_syslog_config () < 0) { controller_exit (1); } set_signal_handlers (); enabled_go_fileserver = should_start_go_fileserver(); if (seaf_controller_start () < 0) controller_exit (1); const char *log_to_stdout_env = g_getenv("SEAFILE_LOG_TO_STDOUT"); if (g_strcmp0(log_to_stdout_env, "true") == 0) { daemon_mode = 0; } #ifndef WIN32 if (daemon_mode) { #ifndef __APPLE__ daemon (1, 0); #else /* __APPLE */ /* daemon is deprecated under APPLE * use fork() instead * */ switch (fork ()) { case -1: seaf_warning ("Failed to daemonize"); exit (-1); break; case 0: /* all good*/ break; default: /* kill origin process */ exit (0); } #endif /* __APPLE */ } #endif /* !WIN32 */ if (controller_pidfile == NULL) { controller_pidfile = g_strdup(g_getenv ("SEAFILE_PIDFILE")); } if (controller_pidfile != NULL) { if (write_controller_pidfile () < 0) { seaf_warning ("Failed to write pidfile %s\n", controller_pidfile); return -1; } } run_controller_loop (); return 0; } ================================================ FILE: controller/seafile-controller.h ================================================ /* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */ /* * Seafile-controller is responsible for: * * 1. Start: start server processes: * * - ccnet-server * - seaf-server * - seaf-mon * * 2. Repair: * * - ensure ccnet process availability by watching client->connfd * - ensure server processes availablity by checking process is running periodically * If some process has stopped working, try to restart it. * */ #ifndef SEAFILE_CONTROLLER_H #define SEAFILE_CONTROLLER_H typedef struct _SeafileController SeafileController; enum { PID_CCNET = 0, PID_SERVER, PID_FILESERVER, PID_SEAFDAV, PID_SEAFEVENTS, N_PID }; typedef struct SeafDavConfig { gboolean enabled; int port; char *host; gboolean debug_mode; } SeafDavConfig; struct _SeafileController { char *central_config_dir; char *config_dir; char *seafile_dir; char *rpc_pipe_path; char *logdir; guint check_process_timer; guint client_io_id; /* Decide whether to start seaf-server in cloud mode */ gboolean cloud_mode; int pid[N_PID]; char *pidfile[N_PID]; SeafDavConfig seafdav_config; gboolean has_seafevents; }; #endif ================================================ FILE: doc/Makefile.am ================================================ EXTRA_DIST = seafile-tutorial.doc ================================================ FILE: fileserver/.golangci.yml ================================================ run: timeout: 2m linters: enable: - govet - gosimple - ineffassign - staticcheck - unused - gofmt disable: - errcheck ================================================ FILE: fileserver/blockmgr/blockmgr.go ================================================ // Package blockmgr provides operations on blocks package blockmgr import ( "github.com/haiwen/seafile-server/fileserver/objstore" "io" ) var store *objstore.ObjectStore // Init initializes block manager and creates underlying object store. func Init(seafileConfPath string, seafileDataDir string) { store = objstore.New(seafileConfPath, seafileDataDir, "blocks") } // Read reads block from storage backend. func Read(repoID string, blockID string, w io.Writer) error { err := store.Read(repoID, blockID, w) if err != nil { return err } return nil } // Write writes block to storage backend. func Write(repoID string, blockID string, r io.Reader) error { err := store.Write(repoID, blockID, r, false) if err != nil { return err } return nil } // Exists checks block if exists. func Exists(repoID string, blockID string) bool { ret, _ := store.Exists(repoID, blockID) return ret } // Stat calculates block size. func Stat(repoID string, blockID string) (int64, error) { ret, err := store.Stat(repoID, blockID) return ret, err } ================================================ FILE: fileserver/blockmgr/blockmgr_test.go ================================================ package blockmgr import ( "bytes" "fmt" "os" "path" "testing" ) const ( blockID = "0401fc662e3bc87a41f299a907c056aaf8322a27" repoID = "b1f2ad61-9164-418a-a47f-ab805dbd5694" seafileConfPath = "/tmp/conf" seafileDataDir = "/tmp/conf/seafile-data" testFile = "output.data" ) func delFile() error { err := os.Remove(testFile) if err != nil { return err } err = os.RemoveAll(seafileConfPath) if err != nil { return err } return nil } func createFile() error { outputFile, err := os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666) if err != nil { return err } defer outputFile.Close() outputString := "hello world!\n" for i := 0; i < 10; i++ { outputFile.WriteString(outputString) } return nil } func TestMain(m *testing.M) { err := createFile() if err != nil { fmt.Printf("Failed to create test file : %v\n", err) os.Exit(1) } code := m.Run() err = delFile() if err != nil { fmt.Printf("Failed to remove test file : %v\n", err) os.Exit(1) } os.Exit(code) } func testBlockRead(t *testing.T) { var buf bytes.Buffer err := Read(repoID, blockID, &buf) if err != nil { t.Errorf("Failed to read block.\n") } } func testBlockWrite(t *testing.T) { inputFile, err := os.Open(testFile) if err != nil { t.Errorf("Failed to open test file : %v\n", err) } defer inputFile.Close() err = Write(repoID, blockID, inputFile) if err != nil { t.Errorf("Failed to write block.\n") } } func testBlockExists(t *testing.T) { ret := Exists(repoID, blockID) if !ret { t.Errorf("Block is not exist\n") } filePath := path.Join(seafileDataDir, "storage", "blocks", repoID, blockID[:2], blockID[2:]) fileInfo, _ := os.Stat(filePath) if fileInfo.Size() != 130 { t.Errorf("Block is exist, but the size of file is incorrect.\n") } } func TestBlock(t *testing.T) { Init(seafileConfPath, seafileDataDir) testBlockWrite(t) testBlockRead(t) testBlockExists(t) } ================================================ FILE: fileserver/commitmgr/commitmgr.go ================================================ // Package commitmgr manages commit objects. package commitmgr import ( "bytes" "crypto/sha1" "encoding/binary" "encoding/hex" "encoding/json" "fmt" "io" "time" "github.com/haiwen/seafile-server/fileserver/objstore" "github.com/haiwen/seafile-server/fileserver/utils" ) // Commit is a commit object type Commit struct { CommitID string `json:"commit_id"` RepoID string `json:"repo_id"` RootID string `json:"root_id"` CreatorName string `json:"creator_name,omitempty"` CreatorID string `json:"creator"` Desc string `json:"description"` Ctime int64 `json:"ctime"` ParentID String `json:"parent_id"` SecondParentID String `json:"second_parent_id"` RepoName string `json:"repo_name"` RepoDesc string `json:"repo_desc"` RepoCategory string `json:"repo_category"` DeviceName string `json:"device_name,omitempty"` ClientVersion string `json:"client_version,omitempty"` Encrypted string `json:"encrypted,omitempty"` EncVersion int `json:"enc_version,omitempty"` Magic string `json:"magic,omitempty"` RandomKey string `json:"key,omitempty"` Salt string `json:"salt,omitempty"` PwdHash string `json:"pwd_hash,omitempty"` PwdHashAlgo string `json:"pwd_hash_algo,omitempty"` PwdHashParams string `json:"pwd_hash_params,omitempty"` Version int `json:"version,omitempty"` Conflict int `json:"conflict,omitempty"` NewMerge int `json:"new_merge,omitempty"` Repaired int `json:"repaired,omitempty"` } var store *objstore.ObjectStore // Init initializes commit manager and creates underlying object store. func Init(seafileConfPath string, seafileDataDir string) { store = objstore.New(seafileConfPath, seafileDataDir, "commits") } // NewCommit initializes a Commit object. func NewCommit(repoID, parentID, newRoot, user, desc string) *Commit { commit := new(Commit) commit.RepoID = repoID commit.RootID = newRoot commit.Desc = desc commit.CreatorName = user commit.CreatorID = "0000000000000000000000000000000000000000" commit.Ctime = time.Now().Unix() commit.CommitID = computeCommitID(commit) if parentID != "" { commit.ParentID.SetValid(parentID) } return commit } func computeCommitID(commit *Commit) string { hash := sha1.New() hash.Write([]byte(commit.RootID)) hash.Write([]byte(commit.CreatorID)) hash.Write([]byte(commit.CreatorName)) hash.Write([]byte(commit.Desc)) tmpBuf := make([]byte, 8) binary.BigEndian.PutUint64(tmpBuf, uint64(commit.Ctime)) hash.Write(tmpBuf) checkSum := hash.Sum(nil) id := hex.EncodeToString(checkSum[:]) return id } // FromData reads from p and converts JSON-encoded data to commit. func (commit *Commit) FromData(p []byte) error { err := json.Unmarshal(p, commit) if err != nil { return err } if !utils.IsValidUUID(commit.RepoID) { return fmt.Errorf("repo id %s is invalid", commit.RepoID) } if !utils.IsObjectIDValid(commit.RootID) { return fmt.Errorf("root id %s is invalid", commit.RootID) } if len(commit.CreatorID) != 40 { return fmt.Errorf("creator id %s is invalid", commit.CreatorID) } if commit.ParentID.Valid && !utils.IsObjectIDValid(commit.ParentID.String) { return fmt.Errorf("parent id %s is invalid", commit.ParentID.String) } if commit.SecondParentID.Valid && !utils.IsObjectIDValid(commit.SecondParentID.String) { return fmt.Errorf("second parent id %s is invalid", commit.SecondParentID.String) } return nil } // ToData converts commit to JSON-encoded data and writes to w. func (commit *Commit) ToData(w io.Writer) error { jsonstr, err := json.Marshal(commit) if err != nil { return err } _, err = w.Write(jsonstr) if err != nil { return err } return nil } // ReadRaw reads data in binary format from storage backend. func ReadRaw(repoID string, commitID string, w io.Writer) error { err := store.Read(repoID, commitID, w) if err != nil { return err } return nil } // WriteRaw writes data in binary format to storage backend. func WriteRaw(repoID string, commitID string, r io.Reader) error { err := store.Write(repoID, commitID, r, false) if err != nil { return err } return nil } // Load commit from storage backend. func Load(repoID string, commitID string) (*Commit, error) { var buf bytes.Buffer commit := new(Commit) err := ReadRaw(repoID, commitID, &buf) if err != nil { return nil, err } err = commit.FromData(buf.Bytes()) if err != nil { return nil, err } return commit, nil } // Save commit to storage backend. func Save(commit *Commit) error { var buf bytes.Buffer err := commit.ToData(&buf) if err != nil { return err } err = WriteRaw(commit.RepoID, commit.CommitID, &buf) if err != nil { return err } return err } // Exists checks commit if exists. func Exists(repoID string, commitID string) (bool, error) { return store.Exists(repoID, commitID) } ================================================ FILE: fileserver/commitmgr/commitmgr_test.go ================================================ package commitmgr import ( "fmt" "os" "testing" "time" ) const ( commitID = "0401fc662e3bc87a41f299a907c056aaf8322a27" rootID = "6a1608dc2a1248838464e9b194800d35252e2ce3" repoID = "b1f2ad61-9164-418a-a47f-ab805dbd5694" seafileConfPath = "/tmp/conf" seafileDataDir = "/tmp/conf/seafile-data" ) func delFile() error { err := os.RemoveAll(seafileConfPath) if err != nil { return err } return nil } func TestMain(m *testing.M) { code := m.Run() err := delFile() if err != nil { fmt.Printf("Failed to remove test file : %v\n", err) os.Exit(1) } os.Exit(code) } func assertEqual(t *testing.T, a, b interface{}) { if a != b { t.Errorf("Not Equal.%t,%t", a, b) } } func TestCommit(t *testing.T) { Init(seafileConfPath, seafileDataDir) newCommit := new(Commit) newCommit.CommitID = commitID newCommit.RepoID = repoID newCommit.RootID = rootID newCommit.CreatorName = "seafile" newCommit.CreatorID = commitID newCommit.Desc = "This is a commit" newCommit.Ctime = time.Now().Unix() newCommit.ParentID.SetValid(commitID) newCommit.DeviceName = "Linux" err := Save(newCommit) if err != nil { t.Errorf("Failed to save commit.\n") } commit, err := Load(repoID, commitID) if err != nil { t.Errorf("Failed to load commit: %v.\n", err) } assertEqual(t, commit.CommitID, commitID) assertEqual(t, commit.RepoID, repoID) assertEqual(t, commit.CreatorName, "seafile") assertEqual(t, commit.CreatorID, commitID) assertEqual(t, commit.ParentID.String, commitID) } ================================================ FILE: fileserver/commitmgr/null.go ================================================ package commitmgr import ( "bytes" "database/sql" "encoding/json" "fmt" ) // nullBytes is a JSON null literal var nullBytes = []byte("null") // String is a nullable string. It supports SQL and JSON serialization. // It will marshal to null if null. Blank string input will be considered null. type String struct { sql.NullString } // StringFrom creates a new String that will never be blank. func StringFrom(s string) String { return NewString(s, true) } // StringFromPtr creates a new String that be null if s is nil. func StringFromPtr(s *string) String { if s == nil { return NewString("", false) } return NewString(*s, true) } // ValueOrZero returns the inner value if valid, otherwise zero. func (s String) ValueOrZero() string { if !s.Valid { return "" } return s.String } // NewString creates a new String func NewString(s string, valid bool) String { return String{ NullString: sql.NullString{ String: s, Valid: valid, }, } } // UnmarshalJSON implements json.Unmarshaler. // It supports string and null input. Blank string input does not produce a null String. func (s *String) UnmarshalJSON(data []byte) error { if bytes.Equal(data, nullBytes) { s.Valid = false return nil } if err := json.Unmarshal(data, &s.String); err != nil { return fmt.Errorf("null: couldn't unmarshal JSON: %w", err) } s.Valid = true return nil } // MarshalJSON implements json.Marshaler. // It will encode null if this String is null. func (s String) MarshalJSON() ([]byte, error) { if !s.Valid { return []byte("null"), nil } return json.Marshal(s.String) } // MarshalText implements encoding.TextMarshaler. // It will encode a blank string when this String is null. func (s String) MarshalText() ([]byte, error) { if !s.Valid { return []byte{}, nil } return []byte(s.String), nil } // UnmarshalText implements encoding.TextUnmarshaler. // It will unmarshal to a null String if the input is a blank string. func (s *String) UnmarshalText(text []byte) error { s.String = string(text) s.Valid = s.String != "" return nil } // SetValid changes this String's value and also sets it to be non-null. func (s *String) SetValid(v string) { s.String = v s.Valid = true } // Ptr returns a pointer to this String's value, or a nil pointer if this String is null. func (s String) Ptr() *string { if !s.Valid { return nil } return &s.String } // IsZero returns true for null strings, for potential future omitempty support. func (s String) IsZero() bool { return !s.Valid } // Equal returns true if both strings have the same value or are both null. func (s String) Equal(other String) bool { return s.Valid == other.Valid && (!s.Valid || s.String == other.String) } ================================================ FILE: fileserver/crypt.go ================================================ package main import ( "bytes" "crypto/aes" "crypto/cipher" ) type seafileCrypt struct { key []byte iv []byte version int } func (crypt *seafileCrypt) encrypt(input []byte) ([]byte, error) { key := crypt.key if crypt.version == 3 { key = to16Bytes(key) } block, err := aes.NewCipher(key) if err != nil { return nil, err } size := block.BlockSize() input = pkcs7Padding(input, size) out := make([]byte, len(input)) if crypt.version == 3 { for bs, be := 0, size; bs < len(input); bs, be = bs+size, be+size { block.Encrypt(out[bs:be], input[bs:be]) } return out, nil } blockMode := cipher.NewCBCEncrypter(block, crypt.iv) blockMode.CryptBlocks(out, input) return out, nil } func (crypt *seafileCrypt) decrypt(input []byte) ([]byte, error) { key := crypt.key if crypt.version == 3 { key = to16Bytes(key) } block, err := aes.NewCipher(key) if err != nil { return nil, err } out := make([]byte, len(input)) size := block.BlockSize() if crypt.version == 3 { // Encryption repo v3 uses AES_128_ecb mode to encrypt and decrypt, each block is encrypted and decrypted independently, // there is no relationship before and after, and iv is not required. for bs, be := 0, size; bs < len(input); bs, be = bs+size, be+size { block.Decrypt(out[bs:be], input[bs:be]) } out = pkcs7UnPadding(out) return out, nil } blockMode := cipher.NewCBCDecrypter(block, crypt.iv) blockMode.CryptBlocks(out, input) out = pkcs7UnPadding(out) return out, nil } func pkcs7Padding(p []byte, blockSize int) []byte { padding := blockSize - len(p)%blockSize padtext := bytes.Repeat([]byte{byte(padding)}, padding) return append(p, padtext...) } func pkcs7UnPadding(p []byte) []byte { length := len(p) paddLen := int(p[length-1]) return p[:(length - paddLen)] } func to16Bytes(input []byte) []byte { out := make([]byte, 16) copy(out, input) return out } ================================================ FILE: fileserver/diff/diff.go ================================================ package diff import ( "context" "fmt" "io" "path/filepath" "strings" "github.com/haiwen/seafile-server/fileserver/commitmgr" "github.com/haiwen/seafile-server/fileserver/fsmgr" "github.com/haiwen/seafile-server/fileserver/repomgr" ) // Empty value of sha1 const ( EmptySha1 = "0000000000000000000000000000000000000000" ) type fileCB func(context.Context, string, []*fsmgr.SeafDirent, interface{}) error type dirCB func(context.Context, string, []*fsmgr.SeafDirent, interface{}, *bool) error type DiffOptions struct { FileCB fileCB DirCB dirCB RepoID string Ctx context.Context Data interface{} Reader io.ReadCloser } type diffData struct { foldDirDiff bool results *[]*DiffEntry } func DiffTrees(roots []string, opt *DiffOptions) error { reader := fsmgr.GetOneZlibReader() defer fsmgr.ReturnOneZlibReader(reader) opt.Reader = reader n := len(roots) if n != 2 && n != 3 { err := fmt.Errorf("the number of commit trees is illegal") return err } trees := make([]*fsmgr.SeafDir, n) for i := 0; i < n; i++ { root, err := fsmgr.GetSeafdirWithZlibReader(opt.RepoID, roots[i], opt.Reader) if err != nil { err := fmt.Errorf("Failed to find dir %s:%s", opt.RepoID, roots[i]) return err } trees[i] = root } return diffTreesRecursive(trees, "", opt) } func diffTreesRecursive(trees []*fsmgr.SeafDir, baseDir string, opt *DiffOptions) error { n := len(trees) ptrs := make([][]*fsmgr.SeafDirent, 3) for i := 0; i < n; i++ { if trees[i] != nil { ptrs[i] = trees[i].Entries } else { ptrs[i] = nil } } var firstName string var done bool var offset = make([]int, n) for { dents := make([]*fsmgr.SeafDirent, 3) firstName = "" done = true for i := 0; i < n; i++ { if len(ptrs[i]) > offset[i] { done = false dent := ptrs[i][offset[i]] if firstName == "" { firstName = dent.Name } else if strings.Compare(dent.Name, firstName) > 0 { firstName = dent.Name } } } if done { break } for i := 0; i < n; i++ { if len(ptrs[i]) > offset[i] { dent := ptrs[i][offset[i]] if firstName == dent.Name { dents[i] = dent offset[i]++ } } } if n == 2 && dents[0] != nil && dents[1] != nil && direntSame(dents[0], dents[1]) { continue } if n == 3 && dents[0] != nil && dents[1] != nil && dents[2] != nil && direntSame(dents[0], dents[1]) && direntSame(dents[0], dents[2]) { continue } if err := diffFiles(baseDir, dents, opt); err != nil { return err } if err := diffDirectories(baseDir, dents, opt); err != nil { return err } } return nil } func diffFiles(baseDir string, dents []*fsmgr.SeafDirent, opt *DiffOptions) error { n := len(dents) var nFiles int files := make([]*fsmgr.SeafDirent, 3) for i := 0; i < n; i++ { if dents[i] != nil && fsmgr.IsRegular(dents[i].Mode) { files[i] = dents[i] nFiles++ } } if nFiles == 0 { return nil } return opt.FileCB(opt.Ctx, baseDir, files, opt.Data) } func diffDirectories(baseDir string, dents []*fsmgr.SeafDirent, opt *DiffOptions) error { n := len(dents) dirs := make([]*fsmgr.SeafDirent, 3) subDirs := make([]*fsmgr.SeafDir, 3) var nDirs int for i := 0; i < n; i++ { if dents[i] != nil && fsmgr.IsDir(dents[i].Mode) { dirs[i] = dents[i] nDirs++ } } if nDirs == 0 { return nil } recurse := true err := opt.DirCB(opt.Ctx, baseDir, dirs, opt.Data, &recurse) if err != nil { err := fmt.Errorf("failed to call dir callback: %w", err) return err } if !recurse { return nil } var dirName string for i := 0; i < n; i++ { if dents[i] != nil && fsmgr.IsDir(dents[i].Mode) { dir, err := fsmgr.GetSeafdirWithZlibReader(opt.RepoID, dents[i].ID, opt.Reader) if err != nil { err := fmt.Errorf("Failed to find dir %s:%s", opt.RepoID, dents[i].ID) return err } subDirs[i] = dir dirName = dents[i].Name } } newBaseDir := baseDir + dirName + "/" return diffTreesRecursive(subDirs, newBaseDir, opt) } func direntSame(dentA, dentB *fsmgr.SeafDirent) bool { return dentA.ID == dentB.ID && dentA.Mode == dentB.Mode && dentA.Mtime == dentB.Mtime } // Diff type and diff status. const ( DiffTypeCommits = 'C' /* diff between two commits*/ DiffStatusAdded = 'A' DiffStatusDeleted = 'D' DiffStatusModified = 'M' DiffStatusRenamed = 'R' DiffStatusUnmerged = 'U' DiffStatusDirAdded = 'B' DiffStatusDirDeleted = 'C' DiffStatusDirRenamed = 'E' ) type DiffEntry struct { DiffType rune Status rune Sha1 string Name string NewName string Size int64 OriginSize int64 } func diffEntryNewFromDirent(diffType, status rune, dent *fsmgr.SeafDirent, baseDir string) *DiffEntry { de := new(DiffEntry) de.Sha1 = dent.ID de.DiffType = diffType de.Status = status de.Size = dent.Size de.Name = filepath.Join(baseDir, dent.Name) return de } func diffEntryNew(diffType, status rune, dirID, name string) *DiffEntry { de := new(DiffEntry) de.DiffType = diffType de.Status = status de.Sha1 = dirID de.Name = name return de } func DiffMergeRoots(storeID, mergedRoot, p1Root, p2Root string, results *[]*DiffEntry, foldDirDiff bool) error { roots := []string{mergedRoot, p1Root, p2Root} opt := new(DiffOptions) opt.RepoID = storeID opt.FileCB = threewayDiffFiles opt.DirCB = threewayDiffDirs opt.Data = diffData{foldDirDiff, results} err := DiffTrees(roots, opt) if err != nil { err := fmt.Errorf("failed to diff trees: %v", err) return err } diffResolveRenames(results) return nil } func threewayDiffFiles(ctx context.Context, baseDir string, dents []*fsmgr.SeafDirent, optData interface{}) error { m := dents[0] p1 := dents[1] p2 := dents[2] data, ok := optData.(diffData) if !ok { err := fmt.Errorf("failed to assert diff data") return err } results := data.results if m != nil && p1 != nil && p2 != nil { if !direntSame(m, p1) && !direntSame(m, p2) { de := diffEntryNewFromDirent(DiffTypeCommits, DiffStatusModified, m, baseDir) *results = append(*results, de) } } else if m == nil && p1 != nil && p2 != nil { de := diffEntryNewFromDirent(DiffTypeCommits, DiffStatusDeleted, p1, baseDir) *results = append(*results, de) } else if m != nil && p1 == nil && p2 != nil { if !direntSame(m, p2) { de := diffEntryNewFromDirent(DiffTypeCommits, DiffStatusModified, m, baseDir) *results = append(*results, de) } } else if m != nil && p1 != nil && p2 == nil { if !direntSame(m, p1) { de := diffEntryNewFromDirent(DiffTypeCommits, DiffStatusModified, m, baseDir) *results = append(*results, de) } } else if m != nil && p1 == nil && p2 == nil { de := diffEntryNewFromDirent(DiffTypeCommits, DiffStatusAdded, m, baseDir) *results = append(*results, de) } return nil } func threewayDiffDirs(ctx context.Context, baseDir string, dents []*fsmgr.SeafDirent, optData interface{}, recurse *bool) error { *recurse = true return nil } func DiffCommitRoots(storeID, p1Root, p2Root string, results *[]*DiffEntry, foldDirDiff bool) error { roots := []string{p1Root, p2Root} opt := new(DiffOptions) opt.RepoID = storeID opt.FileCB = twowayDiffFiles opt.DirCB = twowayDiffDirs opt.Data = diffData{foldDirDiff, results} err := DiffTrees(roots, opt) if err != nil { err := fmt.Errorf("failed to diff trees: %v", err) return err } diffResolveRenames(results) return nil } func DiffCommits(commit1, commit2 *commitmgr.Commit, results *[]*DiffEntry, foldDirDiff bool) error { repo := repomgr.Get(commit1.RepoID) if repo == nil { err := fmt.Errorf("failed to get repo %s", commit1.RepoID) return err } roots := []string{commit1.RootID, commit2.RootID} opt := new(DiffOptions) opt.RepoID = repo.StoreID opt.FileCB = twowayDiffFiles opt.DirCB = twowayDiffDirs opt.Data = diffData{foldDirDiff, results} err := DiffTrees(roots, opt) if err != nil { err := fmt.Errorf("failed to diff trees: %v", err) return err } diffResolveRenames(results) return nil } func twowayDiffFiles(ctx context.Context, baseDir string, dents []*fsmgr.SeafDirent, optData interface{}) error { p1 := dents[0] p2 := dents[1] data, ok := optData.(diffData) if !ok { err := fmt.Errorf("failed to assert diff data") return err } results := data.results if p1 == nil { de := diffEntryNewFromDirent(DiffTypeCommits, DiffStatusAdded, p2, baseDir) *results = append(*results, de) return nil } if p2 == nil { de := diffEntryNewFromDirent(DiffTypeCommits, DiffStatusDeleted, p1, baseDir) *results = append(*results, de) return nil } if !direntSame(p1, p2) { de := diffEntryNewFromDirent(DiffTypeCommits, DiffStatusModified, p2, baseDir) de.OriginSize = p1.Size *results = append(*results, de) } return nil } func twowayDiffDirs(ctx context.Context, baseDir string, dents []*fsmgr.SeafDirent, optData interface{}, recurse *bool) error { p1 := dents[0] p2 := dents[1] data, ok := optData.(diffData) if !ok { err := fmt.Errorf("failed to assert diff data") return err } results := data.results if p1 == nil { if p2.ID == EmptySha1 || data.foldDirDiff { de := diffEntryNewFromDirent(DiffTypeCommits, DiffStatusDirAdded, p2, baseDir) *results = append(*results, de) *recurse = false } else { *recurse = true } return nil } if p2 == nil { de := diffEntryNewFromDirent(DiffTypeCommits, DiffStatusDirDeleted, p1, baseDir) *results = append(*results, de) if data.foldDirDiff { *recurse = false } else { *recurse = true } } return nil } func diffResolveRenames(des *[]*DiffEntry) error { var deletedEmptyCount, deletedEmptyDirCount, addedEmptyCount, addedEmptyDirCount int for _, de := range *des { if de.Sha1 == EmptySha1 { if de.Status == DiffStatusDeleted { deletedEmptyCount++ } if de.Status == DiffStatusDirDeleted { deletedEmptyDirCount++ } if de.Status == DiffStatusAdded { addedEmptyCount++ } if de.Status == DiffStatusDirAdded { addedEmptyDirCount++ } } } deletedFiles := make(map[string]*DiffEntry) deletedDirs := make(map[string]*DiffEntry) var results []*DiffEntry var added []*DiffEntry checkEmptyDir := (deletedEmptyDirCount == 1 && addedEmptyDirCount == 1) checkEmptyFile := (deletedEmptyCount == 1 && addedEmptyCount == 1) for _, de := range *des { if de.Status == DiffStatusDeleted { if de.Sha1 == EmptySha1 && !checkEmptyFile { results = append(results, de) continue } if _, ok := deletedFiles[de.Sha1]; ok { results = append(results, de) continue } deletedFiles[de.Sha1] = de } if de.Status == DiffStatusDirDeleted { if de.Sha1 == EmptySha1 && !checkEmptyDir { results = append(results, de) continue } if _, ok := deletedDirs[de.Sha1]; ok { results = append(results, de) continue } deletedDirs[de.Sha1] = de } if de.Status == DiffStatusAdded { if de.Sha1 == EmptySha1 && !checkEmptyFile { results = append(results, de) continue } added = append(added, de) } if de.Status == DiffStatusDirAdded { if de.Sha1 == EmptySha1 && !checkEmptyDir { results = append(results, de) continue } added = append(added, de) } if de.Status == DiffStatusModified { results = append(results, de) } } for _, de := range added { var deAdd, deDel, deRename *DiffEntry var renameStatus rune deAdd = de if deAdd.Status == DiffStatusAdded { deTmp, ok := deletedFiles[de.Sha1] if !ok { results = append(results, deAdd) continue } deDel = deTmp } else { deTmp, ok := deletedDirs[de.Sha1] if !ok { results = append(results, deAdd) continue } deDel = deTmp } if deAdd.Status == DiffStatusDirAdded { renameStatus = DiffStatusDirRenamed } else { renameStatus = DiffStatusRenamed } deRename = diffEntryNew(deDel.DiffType, renameStatus, deDel.Sha1, deDel.Name) deRename.NewName = de.Name results = append(results, deRename) if deDel.Status == DiffStatusDirDeleted { delete(deletedDirs, deAdd.Sha1) } else { delete(deletedFiles, deAdd.Sha1) } } for _, de := range deletedFiles { results = append(results, de) } for _, de := range deletedDirs { results = append(results, de) } *des = results return nil } func DiffResultsToDesc(results []*DiffEntry) string { var nAddMod, nRemoved, nRenamed int var nNewDir, nRemovedDir int var addModFile, removedFile string var renamedFile string var newDir, removedDir string var desc string if results == nil { return "" } for _, de := range results { switch de.Status { case DiffStatusAdded: if nAddMod == 0 { addModFile = filepath.Base(de.Name) } nAddMod++ case DiffStatusDeleted: if nRemoved == 0 { removedFile = filepath.Base(de.Name) } nRemoved++ case DiffStatusRenamed: if nRenamed == 0 { renamedFile = filepath.Base(de.Name) } nRenamed++ case DiffStatusModified: if nAddMod == 0 { addModFile = filepath.Base(de.Name) } nAddMod++ case DiffStatusDirAdded: if nNewDir == 0 { newDir = filepath.Base(de.Name) } nNewDir++ case DiffStatusDirDeleted: if nRemovedDir == 0 { removedDir = filepath.Base(de.Name) } nRemovedDir++ } } if nAddMod == 1 { desc = fmt.Sprintf("Added or modified \"%s\".\n", addModFile) } else if nAddMod > 1 { desc = fmt.Sprintf("Added or modified \"%s\" and %d more files.\n", addModFile, nAddMod-1) } if nRemoved == 1 { desc += fmt.Sprintf("Deleted \"%s\".\n", removedFile) } else if nRemoved > 1 { desc += fmt.Sprintf("Deleted \"%s\" and %d more files.\n", removedFile, nRemoved-1) } if nRenamed == 1 { desc += fmt.Sprintf("Renamed \"%s\".\n", renamedFile) } else if nRenamed > 1 { desc += fmt.Sprintf("Renamed \"%s\" and %d more files.\n", renamedFile, nRenamed-1) } if nNewDir == 1 { desc += fmt.Sprintf("Added directory \"%s\".\n", newDir) } else if nNewDir > 1 { desc += fmt.Sprintf("Added \"%s\" and %d more directories.\n", newDir, nNewDir-1) } if nRemovedDir == 1 { desc += fmt.Sprintf("Removed directory \"%s\".\n", removedDir) } else if nRemovedDir > 1 { desc += fmt.Sprintf("Removed \"%s\" and %d more directories.\n", removedDir, nRemovedDir-1) } return desc } ================================================ FILE: fileserver/diff/diff_test.go ================================================ package diff import ( "context" "fmt" "os" "syscall" "testing" "github.com/haiwen/seafile-server/fileserver/fsmgr" ) const ( emptySHA1 = "0000000000000000000000000000000000000000" diffTestSeafileConfPath = "/tmp/conf" diffTestSeafileDataDir = "/tmp/conf/seafile-data" diffTestRepoID = "0d18a711-c988-4f7b-960c-211b34705ce3" ) var diffTestTree1 string var diffTestTree2 string var diffTestTree3 string var diffTestTree4 string var diffTestFileID string var diffTestDirID1 string var diffTestDirID2 string /* test directory structure: tree1 |-- tree2 |--file tree3 |--dir tree4 |--dir |-- file */ func TestDiffTrees(t *testing.T) { fsmgr.Init(diffTestSeafileConfPath, diffTestSeafileDataDir, 2<<30) err := diffTestCreateTestDir() if err != nil { fmt.Printf("failed to create test dir: %v", err) os.Exit(1) } t.Run("test1", testDiffTrees1) t.Run("test2", testDiffTrees2) t.Run("test3", testDiffTrees3) t.Run("test4", testDiffTrees4) t.Run("test5", testDiffTrees5) err = diffTestDelFile() if err != nil { fmt.Printf("failed to remove test file : %v", err) } } func diffTestCreateTestDir() error { modeDir := uint32(syscall.S_IFDIR | 0644) modeFile := uint32(syscall.S_IFREG | 0644) dir1, err := diffTestCreateSeafdir(nil) if err != nil { err := fmt.Errorf("failed to get seafdir: %v", err) return err } diffTestTree1 = dir1 file1, err := fsmgr.NewSeafile(1, 1, nil) if err != nil { err := fmt.Errorf("failed to new seafile: %v", err) return err } diffTestFileID = file1.FileID err = fsmgr.SaveSeafile(diffTestRepoID, file1) if err != nil { err := fmt.Errorf("failed to save seafile: %v", err) return err } dent1 := fsmgr.SeafDirent{ID: file1.FileID, Name: "file", Mode: modeFile, Size: 1} dir2, err := diffTestCreateSeafdir([]*fsmgr.SeafDirent{&dent1}) if err != nil { err := fmt.Errorf("failed to get seafdir: %v", err) return err } diffTestTree2 = dir2 dent2 := fsmgr.SeafDirent{ID: dir1, Name: "dir", Mode: modeDir} diffTestDirID1 = dir1 dir3, err := diffTestCreateSeafdir([]*fsmgr.SeafDirent{&dent2}) if err != nil { err := fmt.Errorf("failed to get seafdir: %v", err) return err } diffTestTree3 = dir3 dent3 := fsmgr.SeafDirent{ID: dir2, Name: "dir", Mode: modeDir} diffTestDirID2 = dir2 dir4, err := diffTestCreateSeafdir([]*fsmgr.SeafDirent{&dent3}) if err != nil { err := fmt.Errorf("failed to get seafdir: %v", err) return err } diffTestTree4 = dir4 return nil } func testDiffTrees1(t *testing.T) { var results []interface{} opt := &DiffOptions{ FileCB: diffTestFileCB, DirCB: diffTestDirCB, RepoID: diffTestRepoID} opt.Data = &results DiffTrees([]string{diffTestTree2, diffTestTree1}, opt) if len(results) != 1 { t.Errorf("data length is %d not 1", len(results)) } var ret = make([]string, len(results)) for k, v := range results { ret[k] = fmt.Sprintf("%s", v) } if ret[0] != diffTestFileID { t.Errorf("result %s != %s", ret[0], diffTestFileID) } } func testDiffTrees2(t *testing.T) { var results []interface{} opt := &DiffOptions{ FileCB: diffTestFileCB, DirCB: diffTestDirCB, RepoID: diffTestRepoID} opt.Data = &results DiffTrees([]string{diffTestTree3, diffTestTree1}, opt) if len(results) != 1 { t.Errorf("data length is %d not 1", len(results)) } var ret = make([]string, len(results)) for k, v := range results { ret[k] = fmt.Sprintf("%s", v) } if ret[0] != diffTestDirID1 { t.Errorf("result %s != %s", ret[0], diffTestDirID1) } } func testDiffTrees3(t *testing.T) { var results []interface{} opt := &DiffOptions{ FileCB: diffTestFileCB, DirCB: diffTestDirCB, RepoID: diffTestRepoID} opt.Data = &results DiffTrees([]string{diffTestTree4, diffTestTree1}, opt) if len(results) != 2 { t.Errorf("data length is %d not 1", len(results)) } var ret = make([]string, len(results)) for k, v := range results { ret[k] = fmt.Sprintf("%s", v) } if ret[0] != diffTestDirID2 { t.Errorf("result %s != %s", ret[0], diffTestDirID2) } if ret[1] != diffTestFileID { t.Errorf("result %s != %s", ret[1], diffTestFileID) } } func testDiffTrees4(t *testing.T) { var results []interface{} opt := &DiffOptions{ FileCB: diffTestFileCB, DirCB: diffTestDirCB, RepoID: diffTestRepoID} opt.Data = &results DiffTrees([]string{diffTestTree4, diffTestTree3}, opt) if len(results) != 2 { t.Errorf("data length is %d not 1", len(results)) } var ret = make([]string, len(results)) for k, v := range results { ret[k] = fmt.Sprintf("%s", v) } if ret[0] != diffTestDirID2 { t.Errorf("result %s != %s", ret[0], diffTestDirID2) } if ret[1] != diffTestFileID { t.Errorf("result %s != %s", ret[1], diffTestFileID) } } func testDiffTrees5(t *testing.T) { var results []interface{} opt := &DiffOptions{ FileCB: diffTestFileCB, DirCB: diffTestDirCB, RepoID: diffTestRepoID} opt.Data = &results DiffTrees([]string{diffTestTree3, diffTestTree2}, opt) if len(results) != 1 { t.Errorf("data length is %d not 1", len(results)) } var ret = make([]string, len(results)) for k, v := range results { ret[k] = fmt.Sprintf("%s", v) } if ret[0] != diffTestDirID1 { t.Errorf("result %s != %s", ret[0], diffTestDirID1) } } func diffTestCreateSeafdir(dents []*fsmgr.SeafDirent) (string, error) { seafdir, err := fsmgr.NewSeafdir(1, dents) if err != nil { return "", err } err = fsmgr.SaveSeafdir(diffTestRepoID, seafdir) if err != nil { return "", err } return seafdir.DirID, nil } func diffTestDelFile() error { err := os.RemoveAll(diffTestSeafileConfPath) if err != nil { return err } return nil } func diffTestFileCB(ctx context.Context, baseDir string, files []*fsmgr.SeafDirent, data interface{}) error { file1 := files[0] file2 := files[1] results, ok := data.(*[]interface{}) if !ok { err := fmt.Errorf("failed to assert results") return err } if file1 != nil && (file2 == nil || file1.ID != file2.ID) { *results = append(*results, file1.ID) } return nil } func diffTestDirCB(ctx context.Context, baseDir string, dirs []*fsmgr.SeafDirent, data interface{}, recurse *bool) error { dir1 := dirs[0] dir2 := dirs[1] results, ok := data.(*[]interface{}) if !ok { err := fmt.Errorf("failed to assert results") return err } if dir1 != nil && (dir2 == nil || dir1.ID != dir2.ID) { *results = append(*results, dir1.ID) } return nil } ================================================ FILE: fileserver/fileop.go ================================================ package main import ( "archive/zip" "bytes" "context" "crypto/sha1" "encoding/hex" "encoding/json" "errors" "fmt" "io" "mime" "mime/multipart" "net" "net/http" "net/url" "os" "path/filepath" "runtime/debug" "strconv" "strings" "sync" "time" "unicode/utf8" "database/sql" "math/rand" "sort" "syscall" "github.com/gorilla/mux" "github.com/haiwen/seafile-server/fileserver/blockmgr" "github.com/haiwen/seafile-server/fileserver/commitmgr" "github.com/haiwen/seafile-server/fileserver/diff" "github.com/haiwen/seafile-server/fileserver/fsmgr" "github.com/haiwen/seafile-server/fileserver/option" "github.com/haiwen/seafile-server/fileserver/repomgr" "github.com/haiwen/seafile-server/fileserver/utils" "github.com/haiwen/seafile-server/fileserver/workerpool" log "github.com/sirupsen/logrus" "golang.org/x/text/unicode/norm" ) const ( cacheBlockMapThreshold = 1 << 23 blockMapCacheExpiretime int64 = 3600 * 24 fileopCleaningIntervalSec = 3600 duplicateNamesCount = 1000 ) var ( blockMapCacheTable sync.Map indexFilePool *workerpool.WorkPool ) // Dirents is an alias for slice of SeafDirent. type Dirents []*fsmgr.SeafDirent func (d Dirents) Less(i, j int) bool { return d[i].Name > d[j].Name } func (d Dirents) Swap(i, j int) { d[i], d[j] = d[j], d[i] } func (d Dirents) Len() int { return len(d) } func fileopInit() { ticker := time.NewTicker(time.Second * fileopCleaningIntervalSec) go RecoverWrapper(func() { for range ticker.C { removeFileopExpireCache() } }) indexFilePool = workerpool.CreateWorkerPool(indexFileWorker, int(option.MaxIndexingFiles)) } func initUpload() { objDir := filepath.Join(dataDir, "httptemp", "cluster-shared") os.MkdirAll(objDir, os.ModePerm) } // contentType = "application/octet-stream" func parseContentType(fileName string) string { var contentType string parts := strings.Split(fileName, ".") if len(parts) >= 2 { suffix := parts[len(parts)-1] suffix = strings.ToLower(suffix) switch suffix { case "txt": contentType = "text/plain" case "doc": contentType = "application/vnd.ms-word" case "docx": contentType = "application/vnd.openxmlformats-officedocument.wordprocessingml.document" case "ppt": contentType = "application/vnd.ms-powerpoint" case "xls": contentType = "application/vnd.ms-excel" case "xlsx": contentType = "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet" case "pdf": contentType = "application/pdf" case "zip": contentType = "application/zip" case "mp3": contentType = "audio/mp3" case "mpeg": contentType = "video/mpeg" case "mp4": contentType = "video/mp4" case "ogv": contentType = "video/ogg" case "mov": contentType = "video/mp4" case "webm": contentType = "video/webm" case "mkv": contentType = "video/x-matroska" case "jpeg", "JPEG", "jpg", "JPG": contentType = "image/jpeg" case "png", "PNG": contentType = "image/png" case "gif", "GIF": contentType = "image/gif" case "svg", "SVG": contentType = "image/svg+xml" case "heic": contentType = "image/heic" case "ico": contentType = "image/x-icon" case "bmp": contentType = "image/bmp" case "tif", "tiff": contentType = "image/tiff" case "psd": contentType = "image/vnd.adobe.photoshop" case "webp": contentType = "image/webp" case "jfif": contentType = "image/jpeg" } } return contentType } func accessCB(rsp http.ResponseWriter, r *http.Request) *appError { parts := strings.Split(r.URL.Path[1:], "/") if len(parts) < 3 { msg := "Invalid URL" return &appError{nil, msg, http.StatusBadRequest} } token := parts[1] fileName := parts[2] accessInfo, err := parseWebaccessInfo(token) if err != nil { return err } repoID := accessInfo.repoID op := accessInfo.op user := accessInfo.user objID := accessInfo.objID if op != "view" && op != "download" && op != "download-link" { msg := "Operation does not match access token." return &appError{nil, msg, http.StatusForbidden} } if _, ok := r.Header["If-Modified-Since"]; ok { return &appError{nil, "", http.StatusNotModified} } now := time.Now() rsp.Header().Set("ETag", objID) rsp.Header().Set("Last-Modified", now.Format("Mon, 2 Jan 2006 15:04:05 GMT")) rsp.Header().Set("Cache-Control", "max-age=3600") ranges := r.Header["Range"] byteRanges := strings.Join(ranges, "") repo := repomgr.Get(repoID) if repo == nil { msg := "Bad repo id" return &appError{nil, msg, http.StatusBadRequest} } var cryptKey *seafileCrypt if repo.IsEncrypted { key, err := parseCryptKey(rsp, repoID, user, repo.EncVersion) if err != nil { return err } cryptKey = key } exists, _ := fsmgr.Exists(repo.StoreID, objID) if !exists { msg := "Invalid file id" return &appError{nil, msg, http.StatusBadRequest} } if !repo.IsEncrypted && len(byteRanges) != 0 { if err := doFileRange(rsp, r, repo, objID, fileName, op, byteRanges, user); err != nil { return err } } else if err := doFile(rsp, r, repo, objID, fileName, op, cryptKey, user); err != nil { return err } return nil } func parseCryptKey(rsp http.ResponseWriter, repoID string, user string, version int) (*seafileCrypt, *appError) { key, err := rpcclient.Call("seafile_get_decrypt_key", repoID, user) if err != nil { errMessage := "Repo is encrypted. Please provide password to view it." return nil, &appError{nil, errMessage, http.StatusBadRequest} } cryptKey, ok := key.(map[string]interface{}) if !ok { err := fmt.Errorf("failed to assert crypt key") return nil, &appError{err, "", http.StatusInternalServerError} } seafileKey := new(seafileCrypt) seafileKey.version = version if cryptKey != nil { key, ok := cryptKey["key"].(string) if !ok { err := fmt.Errorf("failed to parse crypt key") return nil, &appError{err, "", http.StatusInternalServerError} } iv, ok := cryptKey["iv"].(string) if !ok { err := fmt.Errorf("failed to parse crypt iv") return nil, &appError{err, "", http.StatusInternalServerError} } seafileKey.key, err = hex.DecodeString(key) if err != nil { err := fmt.Errorf("failed to decode key: %v", err) return nil, &appError{err, "", http.StatusInternalServerError} } seafileKey.iv, err = hex.DecodeString(iv) if err != nil { err := fmt.Errorf("failed to decode iv: %v", err) return nil, &appError{err, "", http.StatusInternalServerError} } } return seafileKey, nil } func accessV2CB(rsp http.ResponseWriter, r *http.Request) *appError { vars := mux.Vars(r) repoID := vars["repoid"] filePath := vars["filepath"] if filePath == "" { msg := "No file path\n" return &appError{nil, msg, http.StatusBadRequest} } rpath := getCanonPath(filePath) fileName := filepath.Base(rpath) op := r.URL.Query().Get("op") if op != "view" && op != "download" { msg := "Operation is neither view or download\n" return &appError{nil, msg, http.StatusBadRequest} } token := utils.GetAuthorizationToken(r.Header) cookie := r.Header.Get("Cookie") if token == "" && cookie == "" { msg := "Both token and cookie are not set\n" return &appError{nil, msg, http.StatusBadRequest} } ipAddr := getClientIPAddr(r) userAgent := r.Header.Get("User-Agent") user, appErr := checkFileAccess(repoID, token, cookie, filePath, "download", ipAddr, userAgent) if appErr != nil { return appErr } repo := repomgr.Get(repoID) if repo == nil { msg := "Bad repo id" return &appError{nil, msg, http.StatusBadRequest} } fileID, _, err := fsmgr.GetObjIDByPath(repo.StoreID, repo.RootID, rpath) if err != nil { msg := "Invalid file_path\n" return &appError{nil, msg, http.StatusBadRequest} } etag := r.Header.Get("If-None-Match") if etag == fileID { return &appError{nil, "", http.StatusNotModified} } rsp.Header().Set("ETag", fileID) rsp.Header().Set("Cache-Control", "private, no-cache") ranges := r.Header["Range"] byteRanges := strings.Join(ranges, "") var cryptKey *seafileCrypt if repo.IsEncrypted { key, err := parseCryptKey(rsp, repoID, user, repo.EncVersion) if err != nil { return err } cryptKey = key } exists, _ := fsmgr.Exists(repo.StoreID, fileID) if !exists { msg := "Invalid file id" return &appError{nil, msg, http.StatusBadRequest} } if !repo.IsEncrypted && len(byteRanges) != 0 { if err := doFileRange(rsp, r, repo, fileID, fileName, op, byteRanges, user); err != nil { return err } } else if err := doFile(rsp, r, repo, fileID, fileName, op, cryptKey, user); err != nil { return err } return nil } type UserInfo struct { User string `json:"user"` } func checkFileAccess(repoID, token, cookie, filePath, op, ipAddr, userAgent string) (string, *appError) { tokenString, err := utils.GenSeahubJWTToken() if err != nil { err := fmt.Errorf("failed to sign jwt token: %v", err) return "", &appError{err, "", http.StatusInternalServerError} } url := fmt.Sprintf("%s/repos/%s/check-access/", option.SeahubURL, repoID) header := map[string][]string{ "Authorization": {"Token " + tokenString}, } if cookie != "" { header["Cookie"] = []string{cookie} } req := make(map[string]string) req["op"] = op req["path"] = filePath if token != "" { req["token"] = token } if ipAddr != "" { req["ip_addr"] = ipAddr } if userAgent != "" { req["user_agent"] = userAgent } msg, err := json.Marshal(req) if err != nil { err := fmt.Errorf("failed to encode access token: %v", err) return "", &appError{err, "", http.StatusInternalServerError} } status, body, err := utils.HttpCommon("POST", url, header, bytes.NewReader(msg)) if err != nil { if status != http.StatusInternalServerError { return "", &appError{nil, string(body), status} } else { err := fmt.Errorf("failed to get access token info: %v", err) return "", &appError{err, "", http.StatusInternalServerError} } } info := new(UserInfo) err = json.Unmarshal(body, &info) if err != nil { err := fmt.Errorf("failed to decode access token info: %v", err) return "", &appError{err, "", http.StatusInternalServerError} } return info.User, nil } func doFile(rsp http.ResponseWriter, r *http.Request, repo *repomgr.Repo, fileID string, fileName string, operation string, cryptKey *seafileCrypt, user string) *appError { file, err := fsmgr.GetSeafile(repo.StoreID, fileID) if err != nil { msg := "Failed to get seafile" return &appError{nil, msg, http.StatusBadRequest} } rsp.Header().Set("Access-Control-Allow-Origin", "*") fileType := parseContentType(fileName) if fileType == "image/svg+xml" { rsp.Header().Set("Content-Security-Policy", "sandbox") } setCommonHeaders(rsp, r, operation, fileName) //filesize string fileSize := fmt.Sprintf("%d", file.FileSize) rsp.Header().Set("Content-Length", fileSize) if r.Method == "HEAD" { rsp.WriteHeader(http.StatusOK) return nil } if file.FileSize == 0 { rsp.WriteHeader(http.StatusOK) return nil } if cryptKey != nil { for _, blkID := range file.BlkIDs { var buf bytes.Buffer blockmgr.Read(repo.StoreID, blkID, &buf) decoded, err := cryptKey.decrypt(buf.Bytes()) if err != nil { err := fmt.Errorf("failed to decrypt block %s: %v", blkID, err) return &appError{err, "", http.StatusInternalServerError} } _, err = rsp.Write(decoded) if err != nil { return nil } } return nil } for _, blkID := range file.BlkIDs { err := blockmgr.Read(repo.StoreID, blkID, rsp) if err != nil { if !isNetworkErr(err) { log.Errorf("failed to read block %s: %v", blkID, err) } return nil } } oper := "web-file-download" if operation == "download-link" { oper = "link-file-download" } sendStatisticMsg(repo.StoreID, user, oper, file.FileSize) return nil } func isNetworkErr(err error) bool { _, ok := err.(net.Error) return ok } type blockMap struct { blkSize []uint64 expireTime int64 } func doFileRange(rsp http.ResponseWriter, r *http.Request, repo *repomgr.Repo, fileID string, fileName string, operation string, byteRanges string, user string) *appError { file, err := fsmgr.GetSeafile(repo.StoreID, fileID) if err != nil { msg := "Failed to get seafile" return &appError{nil, msg, http.StatusBadRequest} } if file.FileSize == 0 { rsp.WriteHeader(http.StatusOK) return nil } start, end, ok := parseRange(byteRanges, file.FileSize) if !ok { conRange := fmt.Sprintf("bytes */%d", file.FileSize) rsp.Header().Set("Content-Range", conRange) return &appError{nil, "", http.StatusRequestedRangeNotSatisfiable} } rsp.Header().Set("Accept-Ranges", "bytes") fileType := parseContentType(fileName) if fileType == "image/svg+xml" { rsp.Header().Set("Content-Security-Policy", "sandbox") } setCommonHeaders(rsp, r, operation, fileName) //filesize string conLen := fmt.Sprintf("%d", end-start+1) rsp.Header().Set("Content-Length", conLen) conRange := fmt.Sprintf("bytes %d-%d/%d", start, end, file.FileSize) rsp.Header().Set("Content-Range", conRange) rsp.WriteHeader(http.StatusPartialContent) var blkSize []uint64 if file.FileSize > cacheBlockMapThreshold { if v, ok := blockMapCacheTable.Load(file.FileID); ok { if blkMap, ok := v.(*blockMap); ok { blkSize = blkMap.blkSize } } if len(blkSize) == 0 { for _, v := range file.BlkIDs { size, err := blockmgr.Stat(repo.StoreID, v) if err != nil { err := fmt.Errorf("failed to stat block %s : %v", v, err) return &appError{err, "", http.StatusInternalServerError} } blkSize = append(blkSize, uint64(size)) } blockMapCacheTable.Store(file.FileID, &blockMap{blkSize, time.Now().Unix() + blockMapCacheExpiretime}) } } else { for _, v := range file.BlkIDs { size, err := blockmgr.Stat(repo.StoreID, v) if err != nil { err := fmt.Errorf("failed to stat block %s : %v", v, err) return &appError{err, "", http.StatusInternalServerError} } blkSize = append(blkSize, uint64(size)) } } var off uint64 var pos uint64 var startBlock int for i, v := range blkSize { pos = start - off off += v if off > start { startBlock = i break } } // Read block from the start block and specified position var i int for ; i < len(file.BlkIDs); i++ { if i < startBlock { continue } blkID := file.BlkIDs[i] var buf bytes.Buffer if end-start+1 <= blkSize[i]-pos { err := blockmgr.Read(repo.StoreID, blkID, &buf) if err != nil { if !isNetworkErr(err) { log.Errorf("failed to read block %s: %v", blkID, err) } return nil } recvBuf := buf.Bytes() rsp.Write(recvBuf[pos : pos+end-start+1]) return nil } err := blockmgr.Read(repo.StoreID, blkID, &buf) if err != nil { if !isNetworkErr(err) { log.Errorf("failed to read block %s: %v", blkID, err) } return nil } recvBuf := buf.Bytes() _, err = rsp.Write(recvBuf[pos:]) if err != nil { return nil } start += blkSize[i] - pos i++ break } // Always read block from the remaining block and pos=0 for ; i < len(file.BlkIDs); i++ { blkID := file.BlkIDs[i] var buf bytes.Buffer if end-start+1 <= blkSize[i] { err := blockmgr.Read(repo.StoreID, blkID, &buf) if err != nil { if !isNetworkErr(err) { log.Errorf("failed to read block %s: %v", blkID, err) } return nil } recvBuf := buf.Bytes() _, err = rsp.Write(recvBuf[:end-start+1]) if err != nil { return nil } break } else { err := blockmgr.Read(repo.StoreID, blkID, rsp) if err != nil { if !isNetworkErr(err) { log.Errorf("failed to read block %s: %v", blkID, err) } return nil } start += blkSize[i] } } oper := "web-file-download" if operation == "download-link" { oper = "link-file-download" } sendStatisticMsg(repo.StoreID, user, oper, end-start+1) return nil } func parseRange(byteRanges string, fileSize uint64) (uint64, uint64, bool) { start := strings.Index(byteRanges, "=") end := strings.Index(byteRanges, "-") if end < 0 { return 0, 0, false } var startByte, endByte uint64 if start+1 == end { retByte, err := strconv.ParseUint(byteRanges[end+1:], 10, 64) if err != nil || retByte == 0 { return 0, 0, false } startByte = fileSize - retByte endByte = fileSize - 1 } else if end+1 == len(byteRanges) { firstByte, err := strconv.ParseUint(byteRanges[start+1:end], 10, 64) if err != nil { return 0, 0, false } startByte = firstByte endByte = fileSize - 1 } else { firstByte, err := strconv.ParseUint(byteRanges[start+1:end], 10, 64) if err != nil { return 0, 0, false } lastByte, err := strconv.ParseUint(byteRanges[end+1:], 10, 64) if err != nil { return 0, 0, false } if lastByte > fileSize-1 { lastByte = fileSize - 1 } startByte = firstByte endByte = lastByte } if startByte > endByte { return 0, 0, false } return startByte, endByte, true } func setCommonHeaders(rsp http.ResponseWriter, r *http.Request, operation, fileName string) { fileType := parseContentType(fileName) if fileType != "" { var contentType string if strings.Contains(fileType, "text") { contentType = fileType + "; " + "charset=gbk" } else { contentType = fileType } rsp.Header().Set("Content-Type", contentType) } else { rsp.Header().Set("Content-Type", "application/octet-stream") } var contFileName string if operation == "download" || operation == "download-link" || operation == "downloadblks" { // Since the file name downloaded by safari will be garbled, we need to encode the filename. // Safari cannot parse unencoded utf8 characters. contFileName = fmt.Sprintf("attachment;filename*=utf-8''%s;filename=\"%s\"", url.PathEscape(fileName), fileName) } else { contFileName = fmt.Sprintf("inline;filename*=utf-8''%s;filename=\"%s\"", url.PathEscape(fileName), fileName) } rsp.Header().Set("Content-Disposition", contFileName) if fileType != "image/jpg" { rsp.Header().Set("X-Content-Type-Options", "nosniff") } } func accessBlksCB(rsp http.ResponseWriter, r *http.Request) *appError { parts := strings.Split(r.URL.Path[1:], "/") if len(parts) < 3 { msg := "Invalid URL" return &appError{nil, msg, http.StatusBadRequest} } token := parts[1] blkID := parts[2] accessInfo, err := parseWebaccessInfo(token) if err != nil { return err } repoID := accessInfo.repoID op := accessInfo.op user := accessInfo.user id := accessInfo.objID if _, ok := r.Header["If-Modified-Since"]; ok { return &appError{nil, "", http.StatusNotModified} } now := time.Now() rsp.Header().Set("Last-Modified", now.Format("Mon, 2 Jan 2006 15:04:05 GMT")) rsp.Header().Set("Cache-Control", "max-age=3600") repo := repomgr.Get(repoID) if repo == nil { msg := "Bad repo id" return &appError{nil, msg, http.StatusBadRequest} } exists, _ := fsmgr.Exists(repo.StoreID, id) if !exists { msg := "Invalid file id" return &appError{nil, msg, http.StatusBadRequest} } if op != "downloadblks" { msg := "Operation does not match access token" return &appError{nil, msg, http.StatusForbidden} } if err := doBlock(rsp, r, repo, id, user, blkID); err != nil { return err } return nil } func doBlock(rsp http.ResponseWriter, r *http.Request, repo *repomgr.Repo, fileID string, user string, blkID string) *appError { file, err := fsmgr.GetSeafile(repo.StoreID, fileID) if err != nil { msg := "Failed to get seafile" return &appError{nil, msg, http.StatusBadRequest} } var found bool for _, id := range file.BlkIDs { if id == blkID { found = true break } } if !found { rsp.WriteHeader(http.StatusBadRequest) return nil } exists := blockmgr.Exists(repo.StoreID, blkID) if !exists { rsp.WriteHeader(http.StatusBadRequest) return nil } rsp.Header().Set("Access-Control-Allow-Origin", "*") setCommonHeaders(rsp, r, "downloadblks", blkID) size, err := blockmgr.Stat(repo.StoreID, blkID) if err != nil { msg := "Failed to stat block" return &appError{nil, msg, http.StatusBadRequest} } if size == 0 { rsp.WriteHeader(http.StatusOK) return nil } fileSize := fmt.Sprintf("%d", size) rsp.Header().Set("Content-Length", fileSize) err = blockmgr.Read(repo.StoreID, blkID, rsp) if err != nil { if !isNetworkErr(err) { log.Errorf("failed to read block %s: %v", blkID, err) } } sendStatisticMsg(repo.StoreID, user, "web-file-download", uint64(size)) return nil } func accessZipCB(rsp http.ResponseWriter, r *http.Request) *appError { parts := strings.Split(r.URL.Path[1:], "/") if len(parts) != 2 { msg := "Invalid URL" return &appError{nil, msg, http.StatusBadRequest} } token := parts[1] accessInfo, err := parseWebaccessInfo(token) if err != nil { return err } repoID := accessInfo.repoID op := accessInfo.op user := accessInfo.user data := accessInfo.objID if op != "download-dir" && op != "download-dir-link" && op != "download-multi" && op != "download-multi-link" { msg := "Operation does not match access token" return &appError{nil, msg, http.StatusForbidden} } if _, ok := r.Header["If-Modified-Since"]; ok { return &appError{nil, "", http.StatusNotModified} } now := time.Now() rsp.Header().Set("Last-Modified", now.Format("Mon, 2 Jan 2006 15:04:05 GMT")) rsp.Header().Set("Cache-Control", "max-age=3600") if err := downloadZipFile(rsp, r, data, repoID, user, op); err != nil { return err } return nil } func downloadZipFile(rsp http.ResponseWriter, r *http.Request, data, repoID, user, op string) *appError { repo := repomgr.Get(repoID) if repo == nil { msg := "Failed to get repo" return &appError{nil, msg, http.StatusBadRequest} } var cryptKey *seafileCrypt if repo.IsEncrypted { key, err := parseCryptKey(rsp, repoID, user, repo.EncVersion) if err != nil { return err } cryptKey = key } obj := make(map[string]interface{}) err := json.Unmarshal([]byte(data), &obj) if err != nil { err := fmt.Errorf("failed to parse obj data for zip: %v", err) return &appError{err, "", http.StatusInternalServerError} } ar := zip.NewWriter(rsp) defer ar.Close() if op == "download-dir" || op == "download-dir-link" { dirName, ok := obj["dir_name"].(string) if !ok || dirName == "" { err := fmt.Errorf("invalid download dir data: miss dir_name field") return &appError{err, "", http.StatusInternalServerError} } objID, ok := obj["obj_id"].(string) if !ok || objID == "" { err := fmt.Errorf("invalid download dir data: miss obj_id field") return &appError{err, "", http.StatusInternalServerError} } zipName := dirName + ".zip" setCommonHeaders(rsp, r, "download", zipName) // The zip name downloaded by safari will be garbled if we encode the zip name, // because we download zip file using chunk encoding. contFileName := fmt.Sprintf("attachment;filename=\"%s\";filename*=utf-8''%s", zipName, url.PathEscape(zipName)) rsp.Header().Set("Content-Disposition", contFileName) rsp.Header().Set("Content-Type", "application/octet-stream") err := packDir(ar, repo, objID, dirName, cryptKey) if err != nil { log.Errorf("failed to pack dir %s: %v", dirName, err) return nil } } else { dirList, err := parseDirFilelist(repo, obj) if err != nil { return &appError{err, "", http.StatusInternalServerError} } now := time.Now() zipName := fmt.Sprintf("documents-export-%d-%d-%d.zip", now.Year(), now.Month(), now.Day()) setCommonHeaders(rsp, r, "download", zipName) contFileName := fmt.Sprintf("attachment;filename=\"%s\";filename*=utf8''%s", zipName, url.PathEscape(zipName)) rsp.Header().Set("Content-Disposition", contFileName) rsp.Header().Set("Content-Type", "application/octet-stream") fileList := []string{} for _, v := range dirList { uniqueName := genUniqueFileName(v.Name, fileList) fileList = append(fileList, uniqueName) if fsmgr.IsDir(v.Mode) { if err := packDir(ar, repo, v.ID, uniqueName, cryptKey); err != nil { if !isNetworkErr(err) { log.Errorf("failed to pack dir %s: %v", v.Name, err) } return nil } } else { if err := packFiles(ar, &v, repo, "", uniqueName, cryptKey); err != nil { if !isNetworkErr(err) { log.Errorf("failed to pack file %s: %v", v.Name, err) } return nil } } } } return nil } func genUniqueFileName(fileName string, fileList []string) string { var uniqueName string var name string i := 1 dot := strings.LastIndex(fileName, ".") if dot < 0 { name = fileName } else { name = fileName[:dot] } uniqueName = fileName for nameInFileList(uniqueName, fileList) { if dot < 0 { uniqueName = fmt.Sprintf("%s (%d)", name, i) } else { uniqueName = fmt.Sprintf("%s (%d).%s", name, i, fileName[dot+1:]) } i++ } return uniqueName } func nameInFileList(fileName string, fileList []string) bool { for _, name := range fileList { if name == fileName { return true } } return false } func parseDirFilelist(repo *repomgr.Repo, obj map[string]interface{}) ([]fsmgr.SeafDirent, error) { parentDir, ok := obj["parent_dir"].(string) if !ok || parentDir == "" { err := fmt.Errorf("invalid download multi data, miss parent_dir field") return nil, err } dir, err := fsmgr.GetSeafdirByPath(repo.StoreID, repo.RootID, parentDir) if err != nil { err := fmt.Errorf("failed to get dir %s repo %s", parentDir, repo.StoreID) return nil, err } fileList, ok := obj["file_list"].([]interface{}) if !ok || fileList == nil { err := fmt.Errorf("invalid download multi data, miss file_list field") return nil, err } direntHash := make(map[string]fsmgr.SeafDirent) for _, v := range dir.Entries { direntHash[v.Name] = *v } direntList := make([]fsmgr.SeafDirent, 0) for _, fileName := range fileList { name, ok := fileName.(string) if !ok { err := fmt.Errorf("invalid download multi data") return nil, err } if name == "" { err := fmt.Errorf("invalid download file name") return nil, err } if strings.Contains(name, "/") { rpath := filepath.Join(parentDir, name) dent, err := fsmgr.GetDirentByPath(repo.StoreID, repo.RootID, rpath) if err != nil { err := fmt.Errorf("failed to get path %s for repo %s: %v", rpath, repo.StoreID, err) return nil, err } direntList = append(direntList, *dent) } else { v, ok := direntHash[name] if !ok { err := fmt.Errorf("invalid download multi data") return nil, err } direntList = append(direntList, v) } } return direntList, nil } func packDir(ar *zip.Writer, repo *repomgr.Repo, dirID, dirPath string, cryptKey *seafileCrypt) error { dirent, err := fsmgr.GetSeafdir(repo.StoreID, dirID) if err != nil { err := fmt.Errorf("failed to get dir for zip: %v", err) return err } if dirent.Entries == nil { fileDir := filepath.Join(dirPath) fileDir = strings.TrimLeft(fileDir, "/") _, err := ar.Create(fileDir + "/") if err != nil { err := fmt.Errorf("failed to create zip dir: %v", err) return err } return nil } entries := dirent.Entries for _, v := range entries { fileDir := filepath.Join(dirPath, v.Name) fileDir = strings.TrimLeft(fileDir, "/") if fsmgr.IsDir(v.Mode) { if err := packDir(ar, repo, v.ID, fileDir, cryptKey); err != nil { return err } } else { if err := packFiles(ar, v, repo, dirPath, v.Name, cryptKey); err != nil { return err } } } return nil } func packFiles(ar *zip.Writer, dirent *fsmgr.SeafDirent, repo *repomgr.Repo, parentPath, baseName string, cryptKey *seafileCrypt) error { file, err := fsmgr.GetSeafile(repo.StoreID, dirent.ID) if err != nil { err := fmt.Errorf("failed to get seafile : %v", err) return err } filePath := filepath.Join(parentPath, baseName) filePath = strings.TrimLeft(filePath, "/") fileHeader := new(zip.FileHeader) fileHeader.Name = filePath fileHeader.Modified = time.Unix(dirent.Mtime, 0) fileHeader.Method = zip.Deflate zipFile, err := ar.CreateHeader(fileHeader) if err != nil { err := fmt.Errorf("failed to create zip file : %v", err) return err } if cryptKey != nil { for _, blkID := range file.BlkIDs { var buf bytes.Buffer blockmgr.Read(repo.StoreID, blkID, &buf) decoded, err := cryptKey.decrypt(buf.Bytes()) if err != nil { err := fmt.Errorf("failed to decrypt block %s: %v", blkID, err) return err } _, err = zipFile.Write(decoded) if err != nil { return err } } return nil } for _, blkID := range file.BlkIDs { err := blockmgr.Read(repo.StoreID, blkID, zipFile) if err != nil { return err } } return nil } type recvData struct { parentDir string tokenType string repoID string user string rstart int64 rend int64 fsize int64 fileNames []string files []string fileHeaders []*multipart.FileHeader } func uploadAPICB(rsp http.ResponseWriter, r *http.Request) *appError { if r.Method == "OPTIONS" { setAccessControl(rsp) rsp.WriteHeader(http.StatusOK) return nil } fsm, err := parseUploadHeaders(r) if err != nil { formatJSONError(rsp, err) return err } if err := doUpload(rsp, r, fsm, false); err != nil { formatJSONError(rsp, err) return err } return nil } func setAccessControl(rsp http.ResponseWriter) { rsp.Header().Set("Access-Control-Allow-Origin", "*") rsp.Header().Set("Access-Control-Allow-Headers", "x-requested-with, content-type, content-range, content-disposition, accept, origin, authorization") rsp.Header().Set("Access-Control-Allow-Methods", "GET, POST, PUT, PATCH, DELETE, OPTIONS") rsp.Header().Set("Access-Control-Max-Age", "86400") } func uploadAjaxCB(rsp http.ResponseWriter, r *http.Request) *appError { if r.Method == "OPTIONS" { setAccessControl(rsp) rsp.WriteHeader(http.StatusOK) return nil } fsm, err := parseUploadHeaders(r) if err != nil { formatJSONError(rsp, err) return err } if err := doUpload(rsp, r, fsm, true); err != nil { formatJSONError(rsp, err) return err } return nil } func formatJSONError(rsp http.ResponseWriter, err *appError) { if err.Message != "" { rsp.Header().Set("Content-Type", "application/json; charset=utf-8") err.Message = fmt.Sprintf("{\"error\": \"%s\"}", err.Message) } } func normalizeUTF8Path(p string) string { newPath := norm.NFC.Bytes([]byte(p)) return string(newPath) } func doUpload(rsp http.ResponseWriter, r *http.Request, fsm *recvData, isAjax bool) *appError { setAccessControl(rsp) if err := r.ParseMultipartForm(1 << 20); err != nil { return &appError{nil, "", http.StatusBadRequest} } defer r.MultipartForm.RemoveAll() repoID := fsm.repoID user := fsm.user replaceStr := r.FormValue("replace") var replaceExisted bool if replaceStr != "" { replace, err := strconv.ParseInt(replaceStr, 10, 64) if err != nil || (replace != 0 && replace != 1) { msg := "Invalid argument replace.\n" return &appError{nil, msg, http.StatusBadRequest} } if replace == 1 { replaceExisted = true } } parentDir := normalizeUTF8Path(r.FormValue("parent_dir")) if parentDir == "" { msg := "No parent_dir given.\n" return &appError{nil, msg, http.StatusBadRequest} } lastModifyStr := normalizeUTF8Path(r.FormValue("last_modify")) var lastModify int64 if lastModifyStr != "" { t, err := time.Parse(time.RFC3339, lastModifyStr) if err == nil { lastModify = t.Unix() } } relativePath := normalizeUTF8Path(r.FormValue("relative_path")) if relativePath != "" { if relativePath[0] == '/' || relativePath[0] == '\\' { msg := "Invalid relative path" return &appError{nil, msg, http.StatusBadRequest} } } newParentDir := filepath.Join("/", parentDir, relativePath) defer clearTmpFile(fsm, newParentDir) if fsm.rstart >= 0 { if parentDir[0] != '/' { msg := "Invalid parent dir" return &appError{nil, msg, http.StatusBadRequest} } formFiles := r.MultipartForm.File files, ok := formFiles["file"] if !ok { msg := "No file in multipart form.\n" return &appError{nil, msg, http.StatusBadRequest} } if len(files) > 1 { msg := "More files in one request" return &appError{nil, msg, http.StatusBadRequest} } err := writeBlockDataToTmpFile(r, fsm, formFiles, repoID, newParentDir) if err != nil { msg := "Internal error.\n" err := fmt.Errorf("failed to write block data to tmp file: %v", err) return &appError{err, msg, http.StatusInternalServerError} } if fsm.rend != fsm.fsize-1 { rsp.Header().Set("Content-Type", "application/json; charset=utf-8") success := "{\"success\": true}" rsp.Write([]byte(success)) return nil } } else { formFiles := r.MultipartForm.File fileHeaders, ok := formFiles["file"] if !ok { msg := "No file in multipart form.\n" return &appError{nil, msg, http.StatusBadRequest} } for _, handler := range fileHeaders { fileName := filepath.Base(handler.Filename) fsm.fileNames = append(fsm.fileNames, normalizeUTF8Path(fileName)) fsm.fileHeaders = append(fsm.fileHeaders, handler) } } if fsm.fileNames == nil { msg := "No file uploaded.\n" return &appError{nil, msg, http.StatusBadRequest} } if err := checkParentDir(repoID, parentDir); err != nil { return err } if !isParentMatched(fsm.parentDir, parentDir) { msg := "Parent dir doesn't match." return &appError{nil, msg, http.StatusForbidden} } if err := checkTmpFileList(fsm); err != nil { return err } var contentLen int64 if fsm.fsize > 0 { contentLen = fsm.fsize } else { lenstr := r.Header.Get("Content-Length") if lenstr == "" { contentLen = -1 } else { tmpLen, err := strconv.ParseInt(lenstr, 10, 64) if err != nil { msg := "Internal error.\n" err := fmt.Errorf("failed to parse content len: %v", err) return &appError{err, msg, http.StatusInternalServerError} } contentLen = tmpLen } } ret, err := checkQuota(repoID, contentLen) if err != nil { msg := "Internal error.\n" err := fmt.Errorf("failed to check quota: %v", err) return &appError{err, msg, http.StatusInternalServerError} } if ret == 1 { msg := "Out of quota.\n" return &appError{nil, msg, seafHTTPResNoQuota} } if err := createRelativePath(repoID, parentDir, relativePath, user); err != nil { return err } if err := postMultiFiles(rsp, r, repoID, newParentDir, user, fsm, replaceExisted, lastModify, isAjax); err != nil { return err } oper := "web-file-upload" if fsm.tokenType == "upload-link" { oper = "link-file-upload" } sendStatisticMsg(repoID, user, oper, uint64(contentLen)) return nil } func writeBlockDataToTmpFile(r *http.Request, fsm *recvData, formFiles map[string][]*multipart.FileHeader, repoID, parentDir string) error { httpTempDir := filepath.Join(absDataDir, "httptemp") fileHeaders, ok := formFiles["file"] if !ok { err := fmt.Errorf("failed to get file from multipart form") return err } filename, err := getFileNameFromMimeHeader(r) if err != nil { return fmt.Errorf("failed to get filename from mime header: %w", err) } handler := fileHeaders[0] file, err := handler.Open() if err != nil { err := fmt.Errorf("failed to open file for read: %v", err) return err } defer file.Close() var f *os.File filePath := filepath.Join("/", parentDir, filename) tmpFile, err := repomgr.GetUploadTmpFile(repoID, filePath) if err != nil || tmpFile == "" { tmpDir := filepath.Join(httpTempDir, "cluster-shared") f, err = os.CreateTemp(tmpDir, filename) if err != nil { return err } repomgr.AddUploadTmpFile(repoID, filePath, f.Name()) tmpFile = f.Name() } else { f, err = os.OpenFile(tmpFile, os.O_WRONLY|os.O_CREATE, 0666) if err != nil { return err } } if fsm.rend == fsm.fsize-1 { fsm.fileNames = append(fsm.fileNames, filepath.Base(filename)) fsm.files = append(fsm.files, tmpFile) } f.Seek(fsm.rstart, 0) io.Copy(f, file) f.Close() return nil } func getFileNameFromMimeHeader(r *http.Request) (string, error) { disposition := r.Header.Get("Content-Disposition") if disposition == "" { err := fmt.Errorf("missing content disposition") return "", err } _, params, err := mime.ParseMediaType(disposition) if err != nil { err := fmt.Errorf("failed to parse Content-Disposition: %v", err) return "", err } filename, err := url.QueryUnescape(params["filename"]) if err != nil { err := fmt.Errorf("failed to get filename: %v", err) return "", err } return normalizeUTF8Path(filename), nil } func createRelativePath(repoID, parentDir, relativePath, user string) *appError { if relativePath == "" { return nil } err := mkdirWithParents(repoID, parentDir, relativePath, user) if err != nil { msg := "Internal error.\n" err := fmt.Errorf("Failed to create parent directory: %v", err) return &appError{err, msg, http.StatusInternalServerError} } return nil } func mkdirWithParents(repoID, parentDir, newDirPath, user string) error { repo := repomgr.Get(repoID) if repo == nil { err := fmt.Errorf("failed to get repo %s", repoID) return err } headCommit, err := commitmgr.Load(repo.ID, repo.HeadCommitID) if err != nil { err := fmt.Errorf("failed to get head commit for repo %s", repo.ID) return err } relativeDirCan := getCanonPath(newDirPath) subFolders := strings.Split(relativeDirCan, "/") for _, name := range subFolders { if name == "" { continue } if shouldIgnoreFile(name) { err := fmt.Errorf("invalid dir name %s", name) return err } } var rootID string var parentDirCan string if parentDir == "/" || parentDir == "\\" { parentDirCan = "/" } else { parentDirCan = getCanonPath(parentDir) } absPath, dirID, err := checkAndCreateDir(repo, headCommit.RootID, parentDirCan, subFolders) if err != nil { err := fmt.Errorf("failed to check and create dir: %v", err) return err } if absPath == "" { return nil } newRootID := headCommit.RootID mtime := time.Now().Unix() mode := (syscall.S_IFDIR | 0644) dent := fsmgr.NewDirent(dirID, filepath.Base(absPath), uint32(mode), mtime, "", 0) var names []string rootID, _ = doPostMultiFiles(repo, newRootID, filepath.Dir(absPath), []*fsmgr.SeafDirent{dent}, user, false, &names) if rootID == "" { err := fmt.Errorf("failed to put dir") return err } buf := fmt.Sprintf("Added directory \"%s\"", relativeDirCan) _, err = genNewCommit(repo, headCommit, rootID, user, buf, true, "", false) if err != nil { err := fmt.Errorf("failed to generate new commit: %v", err) return err } go mergeVirtualRepoPool.AddTask(repo.ID, "") return nil } func checkAndCreateDir(repo *repomgr.Repo, rootID, parentDir string, subFolders []string) (string, string, error) { storeID := repo.StoreID dir, err := fsmgr.GetSeafdirByPath(storeID, rootID, parentDir) if err != nil { err := fmt.Errorf("parent_dir %s doesn't exist in repo %s", parentDir, storeID) return "", "", err } entries := dir.Entries var exists bool var absPath string var dirList []string for i, dirName := range subFolders { for _, de := range entries { if de.Name == dirName { exists = true subDir, err := fsmgr.GetSeafdir(storeID, de.ID) if err != nil { err := fmt.Errorf("failed to get seaf dir: %v", err) return "", "", err } entries = subDir.Entries break } } if !exists { relativePath := filepath.Join(subFolders[:i+1]...) absPath = filepath.Join(parentDir, relativePath) dirList = subFolders[i:] break } exists = false } if dirList != nil { dirList = dirList[1:] } if len(dirList) == 0 { return absPath, "", nil } dirID, err := genDirRecursive(repo, dirList) if err != nil { err := fmt.Errorf("failed to generate dir recursive: %v", err) return "", "", err } return absPath, dirID, nil } func genDirRecursive(repo *repomgr.Repo, toPath []string) (string, error) { if len(toPath) == 1 { uniqueName := toPath[0] mode := (syscall.S_IFDIR | 0644) mtime := time.Now().Unix() dent := fsmgr.NewDirent("", uniqueName, uint32(mode), mtime, "", 0) newdir, err := fsmgr.NewSeafdir(1, []*fsmgr.SeafDirent{dent}) if err != nil { err := fmt.Errorf("failed to new seafdir: %v", err) return "", err } err = fsmgr.SaveSeafdir(repo.StoreID, newdir) if err != nil { err := fmt.Errorf("failed to save seafdir %s/%s", repo.ID, newdir.DirID) return "", err } return newdir.DirID, nil } ret, err := genDirRecursive(repo, toPath[1:]) if err != nil { err := fmt.Errorf("failed to generate dir recursive: %v", err) return "", err } if ret != "" { uniqueName := toPath[0] mode := (syscall.S_IFDIR | 0644) mtime := time.Now().Unix() dent := fsmgr.NewDirent(ret, uniqueName, uint32(mode), mtime, "", 0) newdir, err := fsmgr.NewSeafdir(1, []*fsmgr.SeafDirent{dent}) if err != nil { err := fmt.Errorf("failed to new seafdir: %v", err) return "", err } err = fsmgr.SaveSeafdir(repo.StoreID, newdir) if err != nil { err := fmt.Errorf("failed to save seafdir %s/%s", repo.ID, newdir.DirID) return "", err } ret = newdir.DirID } return ret, nil } func clearTmpFile(fsm *recvData, parentDir string) { if fsm.rstart >= 0 && fsm.rend == fsm.fsize-1 { filePath := filepath.Join("/", parentDir, fsm.fileNames[0]) tmpFile, err := repomgr.GetUploadTmpFile(fsm.repoID, filePath) if err == nil && tmpFile != "" { os.Remove(tmpFile) } repomgr.DelUploadTmpFile(fsm.repoID, filePath) } } func parseUploadHeaders(r *http.Request) (*recvData, *appError) { tokenLen := 36 parts := strings.Split(r.URL.Path[1:], "/") if len(parts) < 2 { msg := "Invalid URL" return nil, &appError{nil, msg, http.StatusBadRequest} } urlOp := parts[0] if len(parts[1]) < tokenLen { msg := "Invalid URL" return nil, &appError{nil, msg, http.StatusBadRequest} } token := parts[1][:tokenLen] accessInfo, appErr := parseWebaccessInfo(token) if appErr != nil { return nil, appErr } repoID := accessInfo.repoID op := accessInfo.op user := accessInfo.user id := accessInfo.objID status, err := repomgr.GetRepoStatus(repoID) if err != nil { return nil, &appError{err, "", http.StatusInternalServerError} } if status != repomgr.RepoStatusNormal && status != -1 { msg := "Repo status not writable." return nil, &appError{nil, msg, http.StatusBadRequest} } if op == "upload-link" { op = "upload" } if strings.Index(urlOp, op) != 0 { msg := "Operation does not match access token." return nil, &appError{nil, msg, http.StatusForbidden} } fsm := new(recvData) if op != "update" { obj := make(map[string]interface{}) if err := json.Unmarshal([]byte(id), &obj); err != nil { err := fmt.Errorf("failed to decode obj data : %v", err) return nil, &appError{err, "", http.StatusInternalServerError} } parentDir, ok := obj["parent_dir"].(string) if !ok || parentDir == "" { err := fmt.Errorf("no parent_dir in access token") return nil, &appError{err, "", http.StatusInternalServerError} } fsm.parentDir = parentDir } fsm.tokenType = accessInfo.op fsm.repoID = repoID fsm.user = user fsm.rstart = -1 fsm.rend = -1 fsm.fsize = -1 ranges := r.Header.Get("Content-Range") if ranges != "" { parseContentRange(ranges, fsm) } var contentLen int64 lenstr := r.Header.Get("Content-Length") if lenstr != "" { conLen, _ := strconv.ParseInt(lenstr, 10, 64) contentLen = conLen if contentLen < 0 { contentLen = 0 } if fsm.fsize > 0 { contentLen = fsm.fsize } } if err := checkQuotaByContentLength(r, repoID, contentLen); err != nil { return nil, err } if err := checkFileSizeByContentLength(r, contentLen); err != nil { return nil, err } return fsm, nil } // Check whether the file to be uploaded would exceed the quota before receiving the body, in order to avoid unnecessarily receiving the body. // After receiving the body, the quota is checked again to handle cases where the Content-Length in the request header is missing, which could make the initial quota check inaccurate. func checkQuotaByContentLength(r *http.Request, repoID string, contentLen int64) *appError { if r.Method != "PUT" && r.Method != "POST" { return nil } ret, err := checkQuota(repoID, contentLen) if err != nil { msg := "Internal error.\n" err := fmt.Errorf("failed to check quota: %v", err) return &appError{err, msg, http.StatusInternalServerError} } if ret == 1 { msg := "Out of quota.\n" return &appError{nil, msg, seafHTTPResNoQuota} } return nil } func checkFileSizeByContentLength(r *http.Request, contentLen int64) *appError { if r.Method != "PUT" && r.Method != "POST" { return nil } if option.MaxUploadSize > 0 && uint64(contentLen) > option.MaxUploadSize { msg := "File size is too large.\n" return &appError{nil, msg, seafHTTPResTooLarge} } return nil } func postMultiFiles(rsp http.ResponseWriter, r *http.Request, repoID, parentDir, user string, fsm *recvData, replace bool, lastModify int64, isAjax bool) *appError { fileNames := fsm.fileNames files := fsm.files repo := repomgr.Get(repoID) if repo == nil { msg := "Failed to get repo.\n" err := fmt.Errorf("Failed to get repo %s", repoID) return &appError{err, msg, http.StatusInternalServerError} } canonPath := getCanonPath(parentDir) if !replace && checkFilesWithSameName(repo, canonPath, fileNames) { msg := "Too many files with same name.\n" return &appError{nil, msg, http.StatusBadRequest} } for _, fileName := range fileNames { if shouldIgnoreFile(fileName) { msg := fmt.Sprintf("invalid fileName: %s.\n", fileName) return &appError{nil, msg, http.StatusBadRequest} } } if strings.Contains(parentDir, "//") { msg := "parent_dir contains // sequence.\n" return &appError{nil, msg, http.StatusBadRequest} } var cryptKey *seafileCrypt if repo.IsEncrypted { key, err := parseCryptKey(rsp, repoID, user, repo.EncVersion) if err != nil { return err } cryptKey = key } gcID, err := repomgr.GetCurrentGCID(repo.StoreID) if err != nil { err := fmt.Errorf("failed to get current gc id for repo %s: %v", repoID, err) return &appError{err, "", http.StatusInternalServerError} } var ids []string var sizes []int64 if fsm.rstart >= 0 { for _, filePath := range files { id, size, err := indexBlocks(r.Context(), repo.StoreID, repo.Version, filePath, nil, cryptKey) if err != nil { if !errors.Is(err, context.Canceled) { err := fmt.Errorf("failed to index blocks: %v", err) return &appError{err, "", http.StatusInternalServerError} } return &appError{nil, "", http.StatusInternalServerError} } ids = append(ids, id) sizes = append(sizes, size) } } else { for _, handler := range fsm.fileHeaders { id, size, err := indexBlocks(r.Context(), repo.StoreID, repo.Version, "", handler, cryptKey) if err != nil { if !errors.Is(err, context.Canceled) { err := fmt.Errorf("failed to index blocks: %v", err) return &appError{err, "", http.StatusInternalServerError} } return &appError{nil, "", http.StatusInternalServerError} } ids = append(ids, id) sizes = append(sizes, size) } } retStr, err := postFilesAndGenCommit(fileNames, repo.ID, user, canonPath, replace, ids, sizes, lastModify, gcID) if err != nil { if errors.Is(err, ErrGCConflict) { return &appError{nil, "GC Conflict.\n", http.StatusConflict} } else { err := fmt.Errorf("failed to post files and gen commit: %v", err) return &appError{err, "", http.StatusInternalServerError} } } _, ok := r.Form["ret-json"] if ok || isAjax { rsp.Header().Set("Content-Type", "application/json; charset=utf-8") rsp.Write([]byte(retStr)) } else { var array []map[string]interface{} err := json.Unmarshal([]byte(retStr), &array) if err != nil { msg := "Internal error.\n" err := fmt.Errorf("failed to decode data to json: %v", err) return &appError{err, msg, http.StatusInternalServerError} } var ids []string for _, v := range array { id, ok := v["id"].(string) if !ok { msg := "Internal error.\n" err := fmt.Errorf("failed to assert") return &appError{err, msg, http.StatusInternalServerError} } ids = append(ids, id) } newIDs := strings.Join(ids, "\t") rsp.Write([]byte(newIDs)) } return nil } func checkFilesWithSameName(repo *repomgr.Repo, canonPath string, fileNames []string) bool { commit, err := commitmgr.Load(repo.ID, repo.HeadCommitID) if err != nil { return false } dir, err := fsmgr.GetSeafdirByPath(repo.StoreID, commit.RootID, canonPath) if err != nil { return false } for _, name := range fileNames { uniqueName := genUniqueName(name, dir.Entries) if uniqueName == "" { return true } } return false } func postFilesAndGenCommit(fileNames []string, repoID string, user, canonPath string, replace bool, ids []string, sizes []int64, lastModify int64, lastGCID string) (string, error) { handleConncurrentUpdate := true if !replace { handleConncurrentUpdate = false } repo := repomgr.Get(repoID) if repo == nil { err := fmt.Errorf("failed to get repo %s", repoID) return "", err } headCommit, err := commitmgr.Load(repo.ID, repo.HeadCommitID) if err != nil { err := fmt.Errorf("failed to get head commit for repo %s", repo.ID) return "", err } var names []string var retryCnt int var dents []*fsmgr.SeafDirent for i, name := range fileNames { if i > len(ids)-1 || i > len(sizes)-1 { break } mode := (syscall.S_IFREG | 0644) mtime := lastModify if mtime <= 0 { mtime = time.Now().Unix() } dent := fsmgr.NewDirent(ids[i], name, uint32(mode), mtime, "", sizes[i]) dents = append(dents, dent) } retry: rootID, err := doPostMultiFiles(repo, headCommit.RootID, canonPath, dents, user, replace, &names) if err != nil { err := fmt.Errorf("failed to post files to %s in repo %s", canonPath, repo.ID) return "", err } var buf string if len(fileNames) > 1 { buf = fmt.Sprintf("Added \"%s\" and %d more files.", fileNames[0], len(fileNames)-1) } else { buf = fmt.Sprintf("Added \"%s\".", fileNames[0]) } _, err = genNewCommit(repo, headCommit, rootID, user, buf, handleConncurrentUpdate, lastGCID, true) if err != nil { if err != ErrConflict { err := fmt.Errorf("failed to generate new commit: %w", err) return "", err } retryCnt++ /* Sleep random time between 0 and 3 seconds. */ random := rand.Intn(30) + 1 log.Debugf("concurrent upload retry :%d", retryCnt) time.Sleep(time.Duration(random*100) * time.Millisecond) repo = repomgr.Get(repoID) if repo == nil { err := fmt.Errorf("failed to get repo %s", repoID) return "", err } headCommit, err = commitmgr.Load(repo.ID, repo.HeadCommitID) if err != nil { err := fmt.Errorf("failed to get head commit for repo %s", repo.ID) return "", err } goto retry } go mergeVirtualRepoPool.AddTask(repo.ID, "") retJSON, err := formatJSONRet(names, ids, sizes) if err != nil { err := fmt.Errorf("failed to format json data") return "", err } return string(retJSON), nil } func formatJSONRet(nameList, idList []string, sizeList []int64) ([]byte, error) { var array []map[string]interface{} for i := range nameList { if i >= len(idList) || i >= len(sizeList) { break } obj := make(map[string]interface{}) obj["name"] = nameList[i] obj["id"] = idList[i] obj["size"] = sizeList[i] array = append(array, obj) } jsonstr, err := json.Marshal(array) if err != nil { err := fmt.Errorf("failed to convert array to json") return nil, err } return jsonstr, nil } func getCanonPath(p string) string { formatPath := strings.Replace(p, "\\", "/", -1) return filepath.Join(formatPath) } var ( ErrConflict = errors.New("Concurent upload conflict") ErrGCConflict = errors.New("GC Conflict") ) func genNewCommit(repo *repomgr.Repo, base *commitmgr.Commit, newRoot, user, desc string, handleConncurrentUpdate bool, lastGCID string, checkGC bool) (string, error) { var retryCnt int repoID := repo.ID commit := commitmgr.NewCommit(repoID, base.CommitID, newRoot, user, desc) repomgr.RepoToCommit(repo, commit) err := commitmgr.Save(commit) if err != nil { err := fmt.Errorf("failed to add commit: %v", err) return "", err } var commitID string maxRetryCnt := 10 for { retry, err := genCommitNeedRetry(repo, base, commit, newRoot, user, handleConncurrentUpdate, &commitID, lastGCID, checkGC) if err != nil { return "", err } if !retry { break } if !handleConncurrentUpdate { return "", ErrConflict } if retryCnt < maxRetryCnt { /* Sleep random time between 0 and 3 seconds. */ random := rand.Intn(30) + 1 time.Sleep(time.Duration(random*100) * time.Millisecond) repo = repomgr.Get(repoID) if repo == nil { err := fmt.Errorf("repo %s doesn't exist", repoID) return "", err } retryCnt++ } else { err := fmt.Errorf("stop updating repo %s after %d retries", repoID, maxRetryCnt) return "", err } } return commitID, nil } func fastForwardOrMerge(user, token string, repo *repomgr.Repo, base, newCommit *commitmgr.Commit) error { var retryCnt int checkGC, err := repomgr.HasLastGCID(repo.ID, token) if err != nil { return err } var lastGCID string if checkGC { lastGCID, _ = repomgr.GetLastGCID(repo.ID, token) repomgr.RemoveLastGCID(repo.ID, token) } for { retry, err := genCommitNeedRetry(repo, base, newCommit, newCommit.RootID, user, true, nil, lastGCID, checkGC) if err != nil { return err } if !retry { break } if retryCnt < 3 { random := rand.Intn(10) + 1 time.Sleep(time.Duration(random*100) * time.Millisecond) retryCnt++ } else { err = fmt.Errorf("stop updating repo %s after 3 retries", repo.ID) return err } } return nil } func genCommitNeedRetry(repo *repomgr.Repo, base *commitmgr.Commit, commit *commitmgr.Commit, newRoot, user string, handleConncurrentUpdate bool, commitID *string, lastGCID string, checkGC bool) (bool, error) { var secondParentID string repoID := repo.ID var mergeDesc string var mergedCommit *commitmgr.Commit currentHead, err := commitmgr.Load(repo.ID, repo.HeadCommitID) if err != nil { err := fmt.Errorf("failed to get head commit for repo %s", repoID) return false, err } if base.CommitID != currentHead.CommitID { if !handleConncurrentUpdate { return false, ErrConflict } roots := []string{base.RootID, currentHead.RootID, newRoot} opt := new(mergeOptions) opt.remoteRepoID = repoID opt.remoteHead = commit.CommitID err := mergeTrees(repo.StoreID, roots, opt) if err != nil { err := fmt.Errorf("failed to merge") return false, err } if !opt.conflict { mergeDesc = "Auto merge by system" } else { mergeDesc = genMergeDesc(repo, opt.mergedRoot, currentHead.RootID, newRoot) if mergeDesc == "" { mergeDesc = "Auto merge by system" } } secondParentID = commit.CommitID mergedCommit = commitmgr.NewCommit(repoID, currentHead.CommitID, opt.mergedRoot, user, mergeDesc) repomgr.RepoToCommit(repo, mergedCommit) mergedCommit.SecondParentID.SetValid(commit.CommitID) mergedCommit.NewMerge = 1 if opt.conflict { mergedCommit.Conflict = 1 } err = commitmgr.Save(mergedCommit) if err != nil { err := fmt.Errorf("failed to add commit: %v", err) return false, err } } else { mergedCommit = commit } gcConflict, err := updateBranch(repoID, repo.StoreID, mergedCommit.CommitID, currentHead.CommitID, secondParentID, checkGC, lastGCID) if gcConflict { return false, err } if err != nil { return true, nil } if commitID != nil { *commitID = mergedCommit.CommitID } return false, nil } func genMergeDesc(repo *repomgr.Repo, mergedRoot, p1Root, p2Root string) string { var results []*diff.DiffEntry err := diff.DiffMergeRoots(repo.StoreID, mergedRoot, p1Root, p2Root, &results, true) if err != nil { return "" } desc := diff.DiffResultsToDesc(results) return desc } func updateBranch(repoID, originRepoID, newCommitID, oldCommitID, secondParentID string, checkGC bool, lastGCID string) (gcConflict bool, err error) { ctx, cancel := context.WithTimeout(context.Background(), option.DBOpTimeout) defer cancel() trans, err := seafileDB.BeginTx(ctx, nil) if err != nil { err := fmt.Errorf("failed to start transaction: %v", err) return false, err } var row *sql.Row var sqlStr string if checkGC { sqlStr = "SELECT gc_id FROM GCID WHERE repo_id = ? FOR UPDATE" if originRepoID == "" { row = trans.QueryRowContext(ctx, sqlStr, repoID) } else { row = trans.QueryRowContext(ctx, sqlStr, originRepoID) } var gcID sql.NullString if err := row.Scan(&gcID); err != nil { if err != sql.ErrNoRows { trans.Rollback() return false, err } } if lastGCID != gcID.String { err = fmt.Errorf("Head branch update for repo %s conflicts with GC.", repoID) trans.Rollback() return true, ErrGCConflict } } var commitID string name := "master" sqlStr = "SELECT commit_id FROM Branch WHERE name = ? AND repo_id = ? FOR UPDATE" row = trans.QueryRowContext(ctx, sqlStr, name, repoID) if err := row.Scan(&commitID); err != nil { if err != sql.ErrNoRows { trans.Rollback() return false, err } } if oldCommitID != commitID { trans.Rollback() err := fmt.Errorf("head commit id has changed") return false, err } sqlStr = "UPDATE Branch SET commit_id = ? WHERE name = ? AND repo_id = ?" _, err = trans.ExecContext(ctx, sqlStr, newCommitID, name, repoID) if err != nil { trans.Rollback() return false, err } trans.Commit() if secondParentID != "" { if err := onBranchUpdated(repoID, secondParentID, false); err != nil { return false, err } } if err := onBranchUpdated(repoID, newCommitID, true); err != nil { return false, err } return false, nil } func onBranchUpdated(repoID string, commitID string, updateRepoInfo bool) error { if updateRepoInfo { if err := repomgr.UpdateRepoInfo(repoID, commitID); err != nil { return err } } if option.EnableNotification { notifRepoUpdate(repoID, commitID) } isVirtual, err := repomgr.IsVirtualRepo(repoID) if err != nil { return err } if isVirtual { return nil } publishUpdateEvent(repoID, commitID) return nil } type notifEvent struct { Type string `json:"type"` Content *repoUpdateEvent `json:"content"` } type repoUpdateEvent struct { RepoID string `json:"repo_id"` CommitID string `json:"commit_id"` } func notifRepoUpdate(repoID string, commitID string) error { content := new(repoUpdateEvent) content.RepoID = repoID content.CommitID = commitID event := new(notifEvent) event.Type = "repo-update" event.Content = content msg, err := json.Marshal(event) if err != nil { log.Errorf("failed to encode repo update event: %v", err) return err } url := fmt.Sprintf("%s/events", option.NotificationURL) exp := time.Now().Add(time.Second * 300).Unix() token, err := utils.GenNotifJWTToken(repoID, "", exp) if err != nil { log.Errorf("failed to generate jwt token: %v", err) return err } header := map[string][]string{ "Authorization": {"Token " + token}, } _, _, err = utils.HttpCommon("POST", url, header, bytes.NewReader(msg)) if err != nil { log.Warnf("failed to send repo update event: %v", err) return err } return nil } func doPostMultiFiles(repo *repomgr.Repo, rootID, parentDir string, dents []*fsmgr.SeafDirent, user string, replace bool, names *[]string) (string, error) { if parentDir[0] == '/' { parentDir = parentDir[1:] } id, err := postMultiFilesRecursive(repo, rootID, parentDir, user, dents, replace, names) if err != nil { err := fmt.Errorf("failed to post multi files: %v", err) return "", err } return id, nil } func postMultiFilesRecursive(repo *repomgr.Repo, dirID, toPath, user string, dents []*fsmgr.SeafDirent, replace bool, names *[]string) (string, error) { olddir, err := fsmgr.GetSeafdir(repo.StoreID, dirID) if err != nil { err := fmt.Errorf("failed to get dir") return "", err } var ret string if toPath == "" { err := addNewEntries(repo, user, &olddir.Entries, dents, replace, names) if err != nil { err := fmt.Errorf("failed to add new entries: %v", err) return "", err } newdir, err := fsmgr.NewSeafdir(1, olddir.Entries) if err != nil { err := fmt.Errorf("failed to new seafdir: %v", err) return "", err } err = fsmgr.SaveSeafdir(repo.StoreID, newdir) if err != nil { err := fmt.Errorf("failed to save seafdir %s/%s", repo.ID, newdir.DirID) return "", err } return newdir.DirID, nil } var remain string firstName := toPath if slash := strings.Index(toPath, "/"); slash >= 0 { remain = toPath[slash+1:] firstName = toPath[:slash] } entries := olddir.Entries for i, dent := range entries { if dent.Name != firstName { continue } id, err := postMultiFilesRecursive(repo, dent.ID, remain, user, dents, replace, names) if err != nil { err := fmt.Errorf("failed to post dirent %s: %v", dent.Name, err) return "", err } ret = id if id != "" { entries[i].ID = id entries[i].Mtime = time.Now().Unix() } break } if ret != "" { newdir, err := fsmgr.NewSeafdir(1, entries) if err != nil { err := fmt.Errorf("failed to new seafdir: %v", err) return "", err } err = fsmgr.SaveSeafdir(repo.StoreID, newdir) if err != nil { err := fmt.Errorf("failed to save seafdir %s/%s", repo.ID, newdir.DirID) return "", err } ret = newdir.DirID } else { // The ret will be an empty string when failed to find parent dir, an error should be returned in such case. err := fmt.Errorf("failed to find parent dir for %s", toPath) return "", err } return ret, nil } func addNewEntries(repo *repomgr.Repo, user string, oldDents *[]*fsmgr.SeafDirent, newDents []*fsmgr.SeafDirent, replaceExisted bool, names *[]string) error { for _, dent := range newDents { var replace bool var uniqueName string if replaceExisted { for i, entry := range *oldDents { if entry.Name == dent.Name { replace = true *oldDents = append((*oldDents)[:i], (*oldDents)[i+1:]...) break } } } if replace { uniqueName = dent.Name } else { uniqueName = genUniqueName(dent.Name, *oldDents) } if uniqueName != "" { newDent := fsmgr.NewDirent(dent.ID, uniqueName, dent.Mode, dent.Mtime, user, dent.Size) *oldDents = append(*oldDents, newDent) *names = append(*names, uniqueName) } else { err := fmt.Errorf("failed to generate unique name for %s", dent.Name) return err } } sort.Sort(Dirents(*oldDents)) return nil } func genUniqueName(fileName string, entries []*fsmgr.SeafDirent) string { var uniqueName string var name string i := 1 dot := strings.LastIndex(fileName, ".") if dot < 0 { name = fileName } else { name = fileName[:dot] } uniqueName = fileName for nameExists(entries, uniqueName) && i <= duplicateNamesCount { if dot < 0 { uniqueName = fmt.Sprintf("%s (%d)", name, i) } else { uniqueName = fmt.Sprintf("%s (%d).%s", name, i, fileName[dot+1:]) } i++ } if i <= duplicateNamesCount { return uniqueName } return "" } func nameExists(entries []*fsmgr.SeafDirent, fileName string) bool { for _, entry := range entries { if entry.Name == fileName { return true } } return false } func shouldIgnore(fileName string) bool { parts := strings.Split(fileName, "/") for _, name := range parts { if name == ".." { return true } } return false } func shouldIgnoreFile(fileName string) bool { if shouldIgnore(fileName) { return true } if !utf8.ValidString(fileName) { log.Warnf("file name %s contains non-UTF8 characters, skip", fileName) return true } if len(fileName) >= 256 { return true } if strings.Contains(fileName, "/") { return true } return false } func indexBlocks(ctx context.Context, repoID string, version int, filePath string, handler *multipart.FileHeader, cryptKey *seafileCrypt) (string, int64, error) { req := &indexFileRequest{ ctx: ctx, repoID: repoID, version: version, filePath: filePath, handler: handler, cryptKey: cryptKey, } recvChan := make(chan *indexFileResult) indexFilePool.AddTask(recvChan, req) result := <-recvChan return result.fileID, result.size, result.err } type indexFileRequest struct { ctx context.Context repoID string version int filePath string handler *multipart.FileHeader cryptKey *seafileCrypt } type indexFileResult struct { fileID string size int64 err error } func indexFileWorker(args ...any) error { resChan := args[0].(chan *indexFileResult) req := args[1].(*indexFileRequest) ctx := req.ctx repoID := req.repoID version := req.version filePath := req.filePath handler := req.handler cryptKey := req.cryptKey var size int64 if handler != nil { size = handler.Size } else { f, err := os.Open(filePath) if err != nil { err := fmt.Errorf("failed to open file: %s: %v", filePath, err) resChan <- &indexFileResult{err: err} return nil } defer f.Close() fileInfo, err := f.Stat() if err != nil { err := fmt.Errorf("failed to stat file %s: %v", filePath, err) resChan <- &indexFileResult{err: err} return nil } size = fileInfo.Size() } if size == 0 { resChan <- &indexFileResult{fileID: fsmgr.EmptySha1, size: 0} return nil } chunkJobs := make(chan chunkingData, 10) results := make(chan chunkingResult, 10) go createChunkPool(ctx, int(option.MaxIndexingThreads), chunkJobs, results) var blkSize int64 var offset int64 jobNum := (uint64(size) + option.FixedBlockSize - 1) / option.FixedBlockSize blkIDs := make([]string, jobNum) left := size for { if uint64(left) >= option.FixedBlockSize { blkSize = int64(option.FixedBlockSize) } else { blkSize = left } if left > 0 { job := chunkingData{repoID, filePath, handler, offset, cryptKey} select { case chunkJobs <- job: left -= blkSize offset += blkSize case result := <-results: if result.err != nil { close(chunkJobs) go RecoverWrapper(func() { for result := range results { _ = result } }) resChan <- &indexFileResult{err: result.err} return nil } blkIDs[result.idx] = result.blkID } } else { close(chunkJobs) for result := range results { if result.err != nil { go RecoverWrapper(func() { for result := range results { _ = result } }) resChan <- &indexFileResult{err: result.err} return nil } blkIDs[result.idx] = result.blkID } break } } fileID, err := writeSeafile(repoID, version, size, blkIDs) if err != nil { err := fmt.Errorf("failed to write seafile: %v", err) resChan <- &indexFileResult{err: err} return nil } resChan <- &indexFileResult{fileID: fileID, size: size} return nil } func writeSeafile(repoID string, version int, fileSize int64, blkIDs []string) (string, error) { seafile, err := fsmgr.NewSeafile(version, fileSize, blkIDs) if err != nil { err := fmt.Errorf("failed to new seafile: %v", err) return "", err } err = fsmgr.SaveSeafile(repoID, seafile) if err != nil { err := fmt.Errorf("failed to save seafile %s/%s", repoID, seafile.FileID) return "", err } return seafile.FileID, nil } type chunkingData struct { repoID string filePath string handler *multipart.FileHeader offset int64 cryptKey *seafileCrypt } type chunkingResult struct { idx int64 blkID string err error } func createChunkPool(ctx context.Context, n int, chunkJobs chan chunkingData, res chan chunkingResult) { defer func() { if err := recover(); err != nil { log.Errorf("panic: %v\n%s", err, debug.Stack()) } }() var wg sync.WaitGroup for i := 0; i < n; i++ { wg.Add(1) go chunkingWorker(ctx, &wg, chunkJobs, res) } wg.Wait() close(res) } func chunkingWorker(ctx context.Context, wg *sync.WaitGroup, chunkJobs chan chunkingData, res chan chunkingResult) { defer func() { if err := recover(); err != nil { log.Errorf("panic: %v\n%s", err, debug.Stack()) } }() for job := range chunkJobs { select { case <-ctx.Done(): err := context.Canceled result := chunkingResult{-1, "", err} res <- result wg.Done() return default: } job := job blkID, err := chunkFile(job) idx := job.offset / int64(option.FixedBlockSize) result := chunkingResult{idx, blkID, err} res <- result } wg.Done() } func chunkFile(job chunkingData) (string, error) { repoID := job.repoID offset := job.offset filePath := job.filePath handler := job.handler blkSize := option.FixedBlockSize cryptKey := job.cryptKey var file multipart.File if handler != nil { f, err := handler.Open() if err != nil { err := fmt.Errorf("failed to open file for read: %v", err) return "", err } defer f.Close() file = f } else { f, err := os.Open(filePath) if err != nil { err := fmt.Errorf("failed to open file for read: %v", err) return "", err } defer f.Close() file = f } _, err := file.Seek(offset, io.SeekStart) if err != nil { err := fmt.Errorf("failed to seek file: %v", err) return "", err } buf := make([]byte, blkSize) n, err := file.Read(buf) if err != nil { err := fmt.Errorf("failed to seek file: %v", err) return "", err } buf = buf[:n] blkID, err := writeChunk(repoID, buf, int64(n), cryptKey) if err != nil { err := fmt.Errorf("failed to write chunk: %v", err) return "", err } return blkID, nil } func writeChunk(repoID string, input []byte, blkSize int64, cryptKey *seafileCrypt) (string, error) { var blkID string if cryptKey != nil && blkSize > 0 { encoded, err := cryptKey.encrypt(input) if err != nil { err := fmt.Errorf("failed to encrypt block: %v", err) return "", err } checkSum := sha1.Sum(encoded) blkID = hex.EncodeToString(checkSum[:]) if blockmgr.Exists(repoID, blkID) { return blkID, nil } reader := bytes.NewReader(encoded) err = blockmgr.Write(repoID, blkID, reader) if err != nil { err := fmt.Errorf("failed to write block: %v", err) return "", err } } else { checkSum := sha1.Sum(input) blkID = hex.EncodeToString(checkSum[:]) if blockmgr.Exists(repoID, blkID) { return blkID, nil } reader := bytes.NewReader(input) err := blockmgr.Write(repoID, blkID, reader) if err != nil { err := fmt.Errorf("failed to write block: %v", err) return "", err } } return blkID, nil } func checkTmpFileList(fsm *recvData) *appError { var totalSize int64 if fsm.rstart >= 0 { for _, tmpFile := range fsm.files { fileInfo, err := os.Stat(tmpFile) if err != nil { msg := "Internal error.\n" err := fmt.Errorf("[upload] Failed to stat temp file %s", tmpFile) return &appError{err, msg, http.StatusInternalServerError} } totalSize += fileInfo.Size() } } else { for _, handler := range fsm.fileHeaders { totalSize += handler.Size } } if option.MaxUploadSize > 0 && uint64(totalSize) > option.MaxUploadSize { msg := "File size is too large.\n" return &appError{nil, msg, seafHTTPResTooLarge} } return nil } func checkParentDir(repoID string, parentDir string) *appError { repo := repomgr.Get(repoID) if repo == nil { msg := "Failed to get repo.\n" err := fmt.Errorf("Failed to get repo %s", repoID) return &appError{err, msg, http.StatusInternalServerError} } commit, err := commitmgr.Load(repoID, repo.HeadCommitID) if err != nil { msg := "Failed to get head commit.\n" err := fmt.Errorf("Failed to get head commit for repo %s", repoID) return &appError{err, msg, http.StatusInternalServerError} } canonPath := getCanonPath(parentDir) _, err = fsmgr.GetSeafdirByPath(repo.StoreID, commit.RootID, canonPath) if err != nil { msg := "Parent dir doesn't exist.\n" return &appError{nil, msg, http.StatusBadRequest} } return nil } func isParentMatched(uploadDir, parentDir string) bool { uploadCanon := filepath.Join("/", uploadDir) parentCanon := filepath.Join("/", parentDir) return uploadCanon == parentCanon } func parseContentRange(ranges string, fsm *recvData) bool { start := strings.Index(ranges, "bytes") end := strings.Index(ranges, "-") slash := strings.Index(ranges, "/") if start < 0 || end < 0 || slash < 0 { return false } startStr := strings.TrimLeft(ranges[start+len("bytes"):end], " ") firstByte, err := strconv.ParseInt(startStr, 10, 64) if err != nil { return false } lastByte, err := strconv.ParseInt(ranges[end+1:slash], 10, 64) if err != nil { return false } fileSize, err := strconv.ParseInt(ranges[slash+1:], 10, 64) if err != nil { return false } if firstByte > lastByte || lastByte >= fileSize { return false } fsm.rstart = firstByte fsm.rend = lastByte fsm.fsize = fileSize return true } type webaccessInfo struct { repoID string objID string op string user string } func parseWebaccessInfo(token string) (*webaccessInfo, *appError) { webaccess, err := rpcclient.Call("seafile_web_query_access_token", token) if err != nil { err := fmt.Errorf("failed to get web access token: %v", err) return nil, &appError{err, "", http.StatusInternalServerError} } if webaccess == nil { msg := "Access token not found" return nil, &appError{err, msg, http.StatusForbidden} } webaccessMap, ok := webaccess.(map[string]interface{}) if !ok { return nil, &appError{nil, "", http.StatusInternalServerError} } accessInfo := new(webaccessInfo) repoID, ok := webaccessMap["repo-id"].(string) if !ok { return nil, &appError{nil, "", http.StatusInternalServerError} } accessInfo.repoID = repoID id, ok := webaccessMap["obj-id"].(string) if !ok { return nil, &appError{nil, "", http.StatusInternalServerError} } accessInfo.objID = id op, ok := webaccessMap["op"].(string) if !ok { return nil, &appError{nil, "", http.StatusInternalServerError} } accessInfo.op = op user, ok := webaccessMap["username"].(string) if !ok { return nil, &appError{nil, "", http.StatusInternalServerError} } accessInfo.user = user return accessInfo, nil } func updateDir(repoID, dirPath, newDirID, user, headID string) (string, error) { repo := repomgr.Get(repoID) if repo == nil { err := fmt.Errorf("failed to get repo %.10s", repoID) return "", err } var base string if headID == "" { base = repo.HeadCommitID } else { base = headID } headCommit, err := commitmgr.Load(repo.ID, base) if err != nil { err := fmt.Errorf("failed to get head commit for repo %s", repo.ID) return "", err } if dirPath == "/" { commitDesc := genCommitDesc(repo, newDirID, headCommit.RootID) if commitDesc == "" { commitDesc = "Auto merge by system" } newCommitID, err := genNewCommit(repo, headCommit, newDirID, user, commitDesc, true, "", false) if err != nil { err := fmt.Errorf("failed to generate new commit: %v", err) return "", err } return newCommitID, nil } parent := filepath.Dir(dirPath) canonPath := getCanonPath(parent) dirName := filepath.Base(dirPath) dir, err := fsmgr.GetSeafdirByPath(repo.StoreID, headCommit.RootID, canonPath) if err != nil { err := fmt.Errorf("dir %s doesn't exist in repo %s", canonPath, repo.StoreID) return "", err } var exists bool for _, de := range dir.Entries { if de.Name == dirName { exists = true } } if !exists { err := fmt.Errorf("directory %s doesn't exist in repo %s", dirName, repo.StoreID) return "", err } newDent := fsmgr.NewDirent(newDirID, dirName, (syscall.S_IFDIR | 0644), time.Now().Unix(), "", 0) rootID, err := doPutFile(repo, headCommit.RootID, canonPath, newDent) if err != nil || rootID == "" { err := fmt.Errorf("failed to put file") return "", err } commitDesc := genCommitDesc(repo, rootID, headCommit.RootID) if commitDesc == "" { commitDesc = "Auto merge by system" } newCommitID, err := genNewCommit(repo, headCommit, rootID, user, commitDesc, true, "", false) if err != nil { err := fmt.Errorf("failed to generate new commit: %v", err) return "", err } go updateSizePool.AddTask(repoID) return newCommitID, nil } func genCommitDesc(repo *repomgr.Repo, root, parentRoot string) string { var results []*diff.DiffEntry err := diff.DiffCommitRoots(repo.StoreID, parentRoot, root, &results, true) if err != nil { return "" } desc := diff.DiffResultsToDesc(results) return desc } func doPutFile(repo *repomgr.Repo, rootID, parentDir string, dent *fsmgr.SeafDirent) (string, error) { if strings.Index(parentDir, "/") == 0 { parentDir = parentDir[1:] } return putFileRecursive(repo, rootID, parentDir, dent) } func putFileRecursive(repo *repomgr.Repo, dirID, toPath string, newDent *fsmgr.SeafDirent) (string, error) { olddir, err := fsmgr.GetSeafdir(repo.StoreID, dirID) if err != nil { err := fmt.Errorf("failed to get dir") return "", err } entries := olddir.Entries var ret string if toPath == "" { var newEntries []*fsmgr.SeafDirent for _, dent := range entries { if dent.Name == newDent.Name { newEntries = append(newEntries, newDent) } else { newEntries = append(newEntries, dent) } } newdir, err := fsmgr.NewSeafdir(1, newEntries) if err != nil { err := fmt.Errorf("failed to new seafdir: %v", err) return "", err } err = fsmgr.SaveSeafdir(repo.StoreID, newdir) if err != nil { err := fmt.Errorf("failed to save seafdir %s/%s", repo.ID, newdir.DirID) return "", err } return newdir.DirID, nil } var remain string firstName := toPath if slash := strings.Index(toPath, "/"); slash >= 0 { remain = toPath[slash+1:] firstName = toPath[:slash] } for _, dent := range entries { if dent.Name != firstName { continue } id, err := putFileRecursive(repo, dent.ID, remain, newDent) if err != nil { err := fmt.Errorf("failed to put dirent %s: %v", dent.Name, err) return "", err } if id != "" { dent.ID = id dent.Mtime = time.Now().Unix() } ret = id break } if ret != "" { newdir, err := fsmgr.NewSeafdir(1, entries) if err != nil { err := fmt.Errorf("failed to new seafdir: %v", err) return "", err } err = fsmgr.SaveSeafdir(repo.StoreID, newdir) if err != nil { err := fmt.Errorf("failed to save seafdir %s/%s", repo.ID, newdir.DirID) return "", err } ret = newdir.DirID } else { err := fmt.Errorf("failed to find parent dir for %s", toPath) return "", err } return ret, nil } func updateAPICB(rsp http.ResponseWriter, r *http.Request) *appError { if r.Method == "OPTIONS" { setAccessControl(rsp) rsp.WriteHeader(http.StatusOK) return nil } fsm, err := parseUploadHeaders(r) if err != nil { formatJSONError(rsp, err) return err } if err := doUpdate(rsp, r, fsm, false); err != nil { formatJSONError(rsp, err) return err } return nil } func updateAjaxCB(rsp http.ResponseWriter, r *http.Request) *appError { if r.Method == "OPTIONS" { setAccessControl(rsp) rsp.WriteHeader(http.StatusOK) return nil } fsm, err := parseUploadHeaders(r) if err != nil { formatJSONError(rsp, err) return err } if err := doUpdate(rsp, r, fsm, true); err != nil { formatJSONError(rsp, err) return err } return nil } func doUpdate(rsp http.ResponseWriter, r *http.Request, fsm *recvData, isAjax bool) *appError { setAccessControl(rsp) if err := r.ParseMultipartForm(1 << 20); err != nil { return &appError{nil, "", http.StatusBadRequest} } defer r.MultipartForm.RemoveAll() repoID := fsm.repoID user := fsm.user targetFile := normalizeUTF8Path(r.FormValue("target_file")) if targetFile == "" { msg := "No target_file given.\n" return &appError{nil, msg, http.StatusBadRequest} } lastModifyStr := normalizeUTF8Path(r.FormValue("last_modify")) var lastModify int64 if lastModifyStr != "" { t, err := time.Parse(time.RFC3339, lastModifyStr) if err == nil { lastModify = t.Unix() } } parentDir := filepath.Dir(targetFile) fileName := filepath.Base(targetFile) defer clearTmpFile(fsm, parentDir) if fsm.rstart >= 0 { if parentDir[0] != '/' { msg := "Invalid parent dir" return &appError{nil, msg, http.StatusBadRequest} } formFiles := r.MultipartForm.File files, ok := formFiles["file"] if !ok { msg := "No file in multipart form.\n" return &appError{nil, msg, http.StatusBadRequest} } if len(files) > 1 { msg := "More files in one request" return &appError{nil, msg, http.StatusBadRequest} } err := writeBlockDataToTmpFile(r, fsm, formFiles, repoID, parentDir) if err != nil { msg := "Internal error.\n" err := fmt.Errorf("failed to write block data to tmp file: %v", err) return &appError{err, msg, http.StatusInternalServerError} } if fsm.rend != fsm.fsize-1 { rsp.Header().Set("Content-Type", "application/json; charset=utf-8") success := "{\"success\": true}" rsp.Write([]byte(success)) return nil } } else { formFiles := r.MultipartForm.File fileHeaders, ok := formFiles["file"] if !ok { msg := "No file in multipart form.\n" return &appError{nil, msg, http.StatusBadRequest} } if len(fileHeaders) > 1 { msg := "More files in one request" return &appError{nil, msg, http.StatusBadRequest} } for _, handler := range fileHeaders { fileName := filepath.Base(handler.Filename) fsm.fileNames = append(fsm.fileNames, fileName) fsm.fileHeaders = append(fsm.fileHeaders, handler) } } if fsm.fileNames == nil { msg := "No file.\n" return &appError{nil, msg, http.StatusBadRequest} } if err := checkParentDir(repoID, parentDir); err != nil { return err } if err := checkTmpFileList(fsm); err != nil { return err } var contentLen int64 if fsm.fsize > 0 { contentLen = fsm.fsize } else { lenstr := r.Header.Get("Content-Length") if lenstr == "" { contentLen = -1 } else { tmpLen, err := strconv.ParseInt(lenstr, 10, 64) if err != nil { msg := "Internal error.\n" err := fmt.Errorf("failed to parse content len: %v", err) return &appError{err, msg, http.StatusInternalServerError} } contentLen = tmpLen } } ret, err := checkQuota(repoID, contentLen) if err != nil { msg := "Internal error.\n" err := fmt.Errorf("failed to check quota: %v", err) return &appError{err, msg, http.StatusInternalServerError} } if ret == 1 { msg := "Out of quota.\n" return &appError{nil, msg, seafHTTPResNoQuota} } headIDs, ok := r.Form["head"] var headID string if ok { headID = headIDs[0] } if err := putFile(rsp, r, repoID, parentDir, user, fileName, fsm, headID, lastModify, isAjax); err != nil { return err } oper := "web-file-upload" sendStatisticMsg(repoID, user, oper, uint64(contentLen)) return nil } func putFile(rsp http.ResponseWriter, r *http.Request, repoID, parentDir, user, fileName string, fsm *recvData, headID string, lastModify int64, isAjax bool) *appError { files := fsm.files repo := repomgr.Get(repoID) if repo == nil { msg := "Failed to get repo.\n" err := fmt.Errorf("Failed to get repo %s", repoID) return &appError{err, msg, http.StatusInternalServerError} } var base string if headID != "" { base = headID } else { base = repo.HeadCommitID } headCommit, err := commitmgr.Load(repo.ID, base) if err != nil { msg := "Failed to get head commit.\n" err := fmt.Errorf("failed to get head commit for repo %s", repo.ID) return &appError{err, msg, http.StatusInternalServerError} } canonPath := getCanonPath(parentDir) if shouldIgnoreFile(fileName) { msg := fmt.Sprintf("invalid fileName: %s.\n", fileName) return &appError{nil, msg, http.StatusBadRequest} } if strings.Contains(parentDir, "//") { msg := "parent_dir contains // sequence.\n" return &appError{nil, msg, http.StatusBadRequest} } exist, _ := checkFileExists(repo.StoreID, headCommit.RootID, canonPath, fileName) if !exist { msg := "File does not exist.\n" return &appError{nil, msg, seafHTTPResNotExists} } var cryptKey *seafileCrypt if repo.IsEncrypted { key, err := parseCryptKey(rsp, repoID, user, repo.EncVersion) if err != nil { return err } cryptKey = key } gcID, err := repomgr.GetCurrentGCID(repo.StoreID) if err != nil { err := fmt.Errorf("failed to get current gc id: %v", err) return &appError{err, "", http.StatusInternalServerError} } var fileID string var size int64 if fsm.rstart >= 0 { filePath := files[0] id, fileSize, err := indexBlocks(r.Context(), repo.StoreID, repo.Version, filePath, nil, cryptKey) if err != nil { if !errors.Is(err, context.Canceled) { err := fmt.Errorf("failed to index blocks: %w", err) return &appError{err, "", http.StatusInternalServerError} } return &appError{nil, "", http.StatusInternalServerError} } fileID = id size = fileSize } else { handler := fsm.fileHeaders[0] id, fileSize, err := indexBlocks(r.Context(), repo.StoreID, repo.Version, "", handler, cryptKey) if err != nil { if !errors.Is(err, context.Canceled) { err := fmt.Errorf("failed to index blocks: %w", err) return &appError{err, "", http.StatusInternalServerError} } return &appError{nil, "", http.StatusInternalServerError} } fileID = id size = fileSize } fullPath := filepath.Join(parentDir, fileName) oldFileID, _, _ := fsmgr.GetObjIDByPath(repo.StoreID, headCommit.RootID, fullPath) if fileID == oldFileID { if isAjax { retJSON, err := formatUpdateJSONRet(fileName, fileID, size) if err != nil { err := fmt.Errorf("failed to format json data") return &appError{err, "", http.StatusInternalServerError} } rsp.Write(retJSON) } else { rsp.Write([]byte(fileID)) } return nil } mtime := time.Now().Unix() if lastModify > 0 { mtime = lastModify } mode := (syscall.S_IFREG | 0644) newDent := fsmgr.NewDirent(fileID, fileName, uint32(mode), mtime, user, size) var names []string rootID, err := doPostMultiFiles(repo, headCommit.RootID, canonPath, []*fsmgr.SeafDirent{newDent}, user, true, &names) if err != nil { err := fmt.Errorf("failed to put file %s to %s in repo %s: %v", fileName, canonPath, repo.ID, err) return &appError{err, "", http.StatusInternalServerError} } desc := fmt.Sprintf("Modified \"%s\"", fileName) _, err = genNewCommit(repo, headCommit, rootID, user, desc, true, gcID, true) if err != nil { if errors.Is(err, ErrGCConflict) { return &appError{nil, "GC Conflict.\n", http.StatusConflict} } else { err := fmt.Errorf("failed to generate new commit: %v", err) return &appError{err, "", http.StatusInternalServerError} } } if isAjax { retJSON, err := formatUpdateJSONRet(fileName, fileID, size) if err != nil { err := fmt.Errorf("failed to format json data") return &appError{err, "", http.StatusInternalServerError} } rsp.Header().Set("Content-Type", "application/json; charset=utf-8") rsp.Write(retJSON) } else { rsp.Write([]byte(fileID)) } go mergeVirtualRepoPool.AddTask(repo.ID) return nil } func formatUpdateJSONRet(fileName, fileID string, size int64) ([]byte, error) { var array []map[string]interface{} obj := make(map[string]interface{}) obj["name"] = fileName obj["id"] = fileID obj["size"] = size array = append(array, obj) jsonstr, err := json.Marshal(array) if err != nil { err := fmt.Errorf("failed to convert array to json") return nil, err } return jsonstr, nil } func checkFileExists(storeID, rootID, parentDir, fileName string) (bool, error) { dir, err := fsmgr.GetSeafdirByPath(storeID, rootID, parentDir) if err != nil { err := fmt.Errorf("parent_dir %s doesn't exist in repo %s: %v", parentDir, storeID, err) return false, err } var ret bool entries := dir.Entries for _, de := range entries { if de.Name == fileName { ret = true break } } return ret, nil } func uploadBlksAPICB(rsp http.ResponseWriter, r *http.Request) *appError { fsm, err := parseUploadHeaders(r) if err != nil { formatJSONError(rsp, err) return err } if err := doUploadBlks(rsp, r, fsm); err != nil { formatJSONError(rsp, err) return err } return nil } func doUploadBlks(rsp http.ResponseWriter, r *http.Request, fsm *recvData) *appError { if err := r.ParseMultipartForm(1 << 20); err != nil { return &appError{nil, "", http.StatusBadRequest} } defer r.MultipartForm.RemoveAll() repoID := fsm.repoID user := fsm.user replaceStr := r.FormValue("replace") var replaceExisted bool if replaceStr != "" { replace, err := strconv.ParseInt(replaceStr, 10, 64) if err != nil || (replace != 0 && replace != 1) { msg := "Invalid argument replace.\n" return &appError{nil, msg, http.StatusBadRequest} } if replace == 1 { replaceExisted = true } } parentDir := normalizeUTF8Path(r.FormValue("parent_dir")) if parentDir == "" { msg := "No parent_dir given.\n" return &appError{nil, msg, http.StatusBadRequest} } lastModifyStr := normalizeUTF8Path(r.FormValue("last_modify")) var lastModify int64 if lastModifyStr != "" { t, err := time.Parse(time.RFC3339, lastModifyStr) if err == nil { lastModify = t.Unix() } } fileName := normalizeUTF8Path(r.FormValue("file_name")) if fileName == "" { msg := "No file_name given.\n" return &appError{nil, msg, http.StatusBadRequest} } fileSizeStr := r.FormValue("file_size") var fileSize int64 = -1 if fileSizeStr != "" { size, err := strconv.ParseInt(fileSizeStr, 10, 64) if err != nil { msg := "Invalid argument file_size.\n" return &appError{nil, msg, http.StatusBadRequest} } fileSize = size } if fileSize < 0 { msg := "Invalid file size.\n" return &appError{nil, msg, http.StatusBadRequest} } commitOnlyStr, ok := r.Form["commitonly"] if !ok || len(commitOnlyStr) == 0 { msg := "Only commit supported.\n" return &appError{nil, msg, http.StatusBadRequest} } if err := checkParentDir(repoID, parentDir); err != nil { return err } blockIDsJSON := r.FormValue("blockids") if blockIDsJSON == "" { msg := "No blockids given.\n" return &appError{nil, msg, http.StatusBadRequest} } fileID, appErr := commitFileBlocks(repoID, parentDir, fileName, blockIDsJSON, user, fileSize, replaceExisted, lastModify) if appErr != nil { return appErr } _, ok = r.Form["ret-json"] if ok { obj := make(map[string]interface{}) obj["id"] = fileID jsonstr, err := json.Marshal(obj) if err != nil { err := fmt.Errorf("failed to convert array to json: %v", err) return &appError{err, "", http.StatusInternalServerError} } rsp.Header().Set("Content-Type", "application/json; charset=utf-8") rsp.Write([]byte(jsonstr)) } else { rsp.Header().Set("Content-Type", "application/json; charset=utf-8") rsp.Write([]byte("\"")) rsp.Write([]byte(fileID)) rsp.Write([]byte("\"")) } return nil } func commitFileBlocks(repoID, parentDir, fileName, blockIDsJSON, user string, fileSize int64, replace bool, lastModify int64) (string, *appError) { repo := repomgr.Get(repoID) if repo == nil { msg := "Failed to get repo.\n" err := fmt.Errorf("Failed to get repo %s", repoID) return "", &appError{err, msg, http.StatusInternalServerError} } headCommit, err := commitmgr.Load(repo.ID, repo.HeadCommitID) if err != nil { msg := "Failed to get head commit.\n" err := fmt.Errorf("failed to get head commit for repo %s", repo.ID) return "", &appError{err, msg, http.StatusInternalServerError} } canonPath := getCanonPath(parentDir) if shouldIgnoreFile(fileName) { msg := fmt.Sprintf("invalid fileName: %s.\n", fileName) return "", &appError{nil, msg, http.StatusBadRequest} } if strings.Contains(parentDir, "//") { msg := "parent_dir contains // sequence.\n" return "", &appError{nil, msg, http.StatusBadRequest} } var blkIDs []string err = json.Unmarshal([]byte(blockIDsJSON), &blkIDs) if err != nil { err := fmt.Errorf("failed to decode data to json: %v", err) return "", &appError{err, "", http.StatusInternalServerError} } appErr := checkQuotaBeforeCommitBlocks(repo.StoreID, blkIDs) if appErr != nil { return "", appErr } gcID, err := repomgr.GetCurrentGCID(repo.StoreID) if err != nil { err := fmt.Errorf("failed to get current gc id: %v", err) return "", &appError{err, "", http.StatusInternalServerError} } fileID, appErr := indexExistedFileBlocks(repoID, repo.Version, blkIDs, fileSize) if appErr != nil { return "", appErr } mtime := time.Now().Unix() if lastModify > 0 { mtime = lastModify } mode := (syscall.S_IFREG | 0644) newDent := fsmgr.NewDirent(fileID, fileName, uint32(mode), mtime, user, fileSize) var names []string rootID, err := doPostMultiFiles(repo, headCommit.RootID, canonPath, []*fsmgr.SeafDirent{newDent}, user, replace, &names) if err != nil { err := fmt.Errorf("failed to post file %s to %s in repo %s: %v", fileName, canonPath, repo.ID, err) return "", &appError{err, "", http.StatusInternalServerError} } desc := fmt.Sprintf("Added \"%s\"", fileName) _, err = genNewCommit(repo, headCommit, rootID, user, desc, true, gcID, true) if err != nil { if errors.Is(err, ErrGCConflict) { return "", &appError{nil, "GC Conflict.\n", http.StatusConflict} } else { err := fmt.Errorf("failed to generate new commit: %v", err) return "", &appError{err, "", http.StatusInternalServerError} } } return fileID, nil } func checkQuotaBeforeCommitBlocks(storeID string, blockIDs []string) *appError { var totalSize int64 for _, blkID := range blockIDs { size, err := blockmgr.Stat(storeID, blkID) if err != nil { err := fmt.Errorf("failed to stat block %s in store %s: %v", blkID, storeID, err) return &appError{err, "", http.StatusInternalServerError} } totalSize += size } ret, err := checkQuota(storeID, totalSize) if err != nil { msg := "Internal error.\n" err := fmt.Errorf("failed to check quota: %v", err) return &appError{err, msg, http.StatusInternalServerError} } if ret == 1 { msg := "Out of quota.\n" return &appError{nil, msg, seafHTTPResNoQuota} } return nil } func indexExistedFileBlocks(repoID string, version int, blkIDs []string, fileSize int64) (string, *appError) { if len(blkIDs) == 0 { return fsmgr.EmptySha1, nil } for _, blkID := range blkIDs { if !blockmgr.Exists(repoID, blkID) { err := fmt.Errorf("failed to check block: %s", blkID) return "", &appError{err, "", seafHTTPResBlockMissing} } } fileID, err := writeSeafile(repoID, version, fileSize, blkIDs) if err != nil { err := fmt.Errorf("failed to write seafile: %v", err) return "", &appError{err, "", http.StatusInternalServerError} } return fileID, nil } func uploadRawBlksAPICB(rsp http.ResponseWriter, r *http.Request) *appError { fsm, err := parseUploadHeaders(r) if err != nil { formatJSONError(rsp, err) return err } if err := doUploadRawBlks(rsp, r, fsm); err != nil { formatJSONError(rsp, err) return err } return nil } func doUploadRawBlks(rsp http.ResponseWriter, r *http.Request, fsm *recvData) *appError { if err := r.ParseMultipartForm(1 << 20); err != nil { return &appError{nil, "", http.StatusBadRequest} } defer r.MultipartForm.RemoveAll() repoID := fsm.repoID user := fsm.user formFiles := r.MultipartForm.File fileHeaders, ok := formFiles["file"] if !ok { msg := "No file in multipart form.\n" return &appError{nil, msg, http.StatusBadRequest} } for _, handler := range fileHeaders { fileName := filepath.Base(handler.Filename) fsm.fileNames = append(fsm.fileNames, fileName) fsm.fileHeaders = append(fsm.fileHeaders, handler) } if fsm.fileNames == nil { msg := "No file.\n" return &appError{nil, msg, http.StatusBadRequest} } if err := checkTmpFileList(fsm); err != nil { return err } if err := postBlocks(repoID, user, fsm); err != nil { return err } var contentLen int64 lenstr := r.Header.Get("Content-Length") if lenstr != "" { conLen, err := strconv.ParseInt(lenstr, 10, 64) if err != nil { msg := "Internal error.\n" err := fmt.Errorf("failed to parse content len: %v", err) return &appError{err, msg, http.StatusInternalServerError} } contentLen = conLen } oper := "web-file-upload" sendStatisticMsg(repoID, user, oper, uint64(contentLen)) rsp.Header().Set("Content-Type", "application/json; charset=utf-8") rsp.Write([]byte("\"OK\"")) return nil } func postBlocks(repoID, user string, fsm *recvData) *appError { blockIDs := fsm.fileNames fileHeaders := fsm.fileHeaders repo := repomgr.Get(repoID) if repo == nil { msg := "Failed to get repo.\n" err := fmt.Errorf("Failed to get repo %s", repoID) return &appError{err, msg, http.StatusInternalServerError} } if err := indexRawBlocks(repo.StoreID, blockIDs, fileHeaders); err != nil { err := fmt.Errorf("failed to index file blocks") return &appError{err, "", http.StatusInternalServerError} } go updateSizePool.AddTask(repo.ID) return nil } func indexRawBlocks(repoID string, blockIDs []string, fileHeaders []*multipart.FileHeader) error { for i, handler := range fileHeaders { var buf bytes.Buffer f, err := handler.Open() if err != nil { err := fmt.Errorf("failed to open file for read: %v", err) return err } _, err = buf.ReadFrom(f) if err != nil { err := fmt.Errorf("failed to read block: %v", err) return err } checkSum := sha1.Sum(buf.Bytes()) blkID := hex.EncodeToString(checkSum[:]) if blkID != blockIDs[i] { err := fmt.Errorf("block id %s:%s doesn't match content", blkID, blockIDs[i]) return err } err = blockmgr.Write(repoID, blkID, &buf) if err != nil { err := fmt.Errorf("failed to write block: %s/%s: %v", repoID, blkID, err) return err } } return nil } /* func uploadLinkCB(rsp http.ResponseWriter, r *http.Request) *appError { if seahubPK == "" { err := fmt.Errorf("no seahub private key is configured") return &appError{err, "", http.StatusNotFound} } if r.Method == "OPTIONS" { setAccessControl(rsp) rsp.WriteHeader(http.StatusOK) return nil } fsm, err := parseUploadLinkHeaders(r) if err != nil { return err } if err := doUpload(rsp, r, fsm, false); err != nil { formatJSONError(rsp, err) return err } return nil } func parseUploadLinkHeaders(r *http.Request) (*recvData, *appError) { tokenLen := 36 parts := strings.Split(r.URL.Path[1:], "/") if len(parts) < 2 { msg := "Invalid URL" return nil, &appError{nil, msg, http.StatusBadRequest} } if len(parts[1]) < tokenLen { msg := "Invalid URL" return nil, &appError{nil, msg, http.StatusBadRequest} } token := parts[1][:tokenLen] info, appErr := queryShareLinkInfo(token, "upload") if appErr != nil { return nil, appErr } repoID := info.RepoID parentDir := normalizeUTF8Path(info.ParentDir) status, err := repomgr.GetRepoStatus(repoID) if err != nil { return nil, &appError{err, "", http.StatusInternalServerError} } if status != repomgr.RepoStatusNormal && status != -1 { msg := "Repo status not writable." return nil, &appError{nil, msg, http.StatusBadRequest} } user, _ := repomgr.GetRepoOwner(repoID) fsm := new(recvData) fsm.parentDir = parentDir fsm.tokenType = "upload-link" fsm.repoID = repoID fsm.user = user fsm.rstart = -1 fsm.rend = -1 fsm.fsize = -1 ranges := r.Header.Get("Content-Range") if ranges != "" { parseContentRange(ranges, fsm) } return fsm, nil } */ type ShareLinkInfo struct { RepoID string `json:"repo_id"` FilePath string `json:"file_path"` ParentDir string `json:"parent_dir"` ShareType string `json:"share_type"` } func queryShareLinkInfo(token, cookie, opType, ipAddr, userAgent string) (*ShareLinkInfo, *appError) { tokenString, err := utils.GenSeahubJWTToken() if err != nil { err := fmt.Errorf("failed to sign jwt token: %v", err) return nil, &appError{err, "", http.StatusInternalServerError} } url := fmt.Sprintf("%s?type=%s", option.SeahubURL+"/check-share-link-access/", opType) header := map[string][]string{ "Authorization": {"Token " + tokenString}, } if cookie != "" { header["Cookie"] = []string{cookie} } req := make(map[string]string) req["token"] = token if ipAddr != "" { req["ip_addr"] = ipAddr } if userAgent != "" { req["user_agent"] = userAgent } msg, err := json.Marshal(req) if err != nil { err := fmt.Errorf("failed to encode access token: %v", err) return nil, &appError{err, "", http.StatusInternalServerError} } status, body, err := utils.HttpCommon("POST", url, header, bytes.NewReader(msg)) if err != nil { if status != http.StatusInternalServerError { return nil, &appError{nil, string(body), status} } else { err := fmt.Errorf("failed to get share link info: %v", err) return nil, &appError{err, "", http.StatusInternalServerError} } } info := new(ShareLinkInfo) err = json.Unmarshal(body, &info) if err != nil { err := fmt.Errorf("failed to decode share link info: %v", err) return nil, &appError{err, "", http.StatusInternalServerError} } return info, nil } func accessLinkCB(rsp http.ResponseWriter, r *http.Request) *appError { if option.JWTPrivateKey == "" { err := fmt.Errorf("no seahub private key is configured") return &appError{err, "", http.StatusNotFound} } parts := strings.Split(r.URL.Path[1:], "/") if len(parts) < 2 { msg := "Invalid URL" return &appError{nil, msg, http.StatusBadRequest} } token := parts[1] cookie := r.Header.Get("Cookie") ipAddr := getClientIPAddr(r) userAgent := r.Header.Get("User-Agent") info, appErr := queryShareLinkInfo(token, cookie, "file", ipAddr, userAgent) if appErr != nil { return appErr } if info.FilePath == "" { msg := "Internal server error\n" err := fmt.Errorf("failed to get file_path by token %s", token) return &appError{err, msg, http.StatusInternalServerError} } if info.ShareType != "f" { msg := "Link type mismatch" return &appError{nil, msg, http.StatusBadRequest} } repoID := info.RepoID filePath := normalizeUTF8Path(info.FilePath) fileName := filepath.Base(filePath) op := r.URL.Query().Get("op") if op != "view" { op = "download-link" } ranges := r.Header["Range"] byteRanges := strings.Join(ranges, "") repo := repomgr.Get(repoID) if repo == nil { msg := "Bad repo id\n" return &appError{nil, msg, http.StatusBadRequest} } user, _ := repomgr.GetRepoOwner(repoID) fileID, _, err := fsmgr.GetObjIDByPath(repo.StoreID, repo.RootID, filePath) if err != nil { msg := "Invalid file_path\n" return &appError{nil, msg, http.StatusBadRequest} } // Check for file changes by comparing the ETag in the If-None-Match header with the file ID. Set no-cache to allow clients to validate file changes before using the cache. etag := r.Header.Get("If-None-Match") if etag == fileID { return &appError{nil, "", http.StatusNotModified} } rsp.Header().Set("ETag", fileID) rsp.Header().Set("Cache-Control", "public, no-cache") var cryptKey *seafileCrypt if repo.IsEncrypted { key, err := parseCryptKey(rsp, repoID, user, repo.EncVersion) if err != nil { return err } cryptKey = key } exists, _ := fsmgr.Exists(repo.StoreID, fileID) if !exists { msg := "Invalid file id" return &appError{nil, msg, http.StatusBadRequest} } if !repo.IsEncrypted && len(byteRanges) != 0 { if err := doFileRange(rsp, r, repo, fileID, fileName, op, byteRanges, user); err != nil { return err } } else if err := doFile(rsp, r, repo, fileID, fileName, op, cryptKey, user); err != nil { return err } return nil } /* func accessDirLinkCB(rsp http.ResponseWriter, r *http.Request) *appError { if seahubPK == "" { err := fmt.Errorf("no seahub private key is configured") return &appError{err, "", http.StatusNotFound} } parts := strings.Split(r.URL.Path[1:], "/") if len(parts) < 2 { msg := "Invalid URL" return &appError{nil, msg, http.StatusBadRequest} } token := parts[1] info, appErr := queryShareLinkInfo(token, "dir") if appErr != nil { return appErr } repoID := info.RepoID parentDir := normalizeUTF8Path(info.ParentDir) op := "download-link" repo := repomgr.Get(repoID) if repo == nil { msg := "Bad repo id\n" return &appError{nil, msg, http.StatusBadRequest} } user, _ := repomgr.GetRepoOwner(repoID) filePath := r.URL.Query().Get("p") if filePath == "" { err := r.ParseForm() if err != nil { msg := "Invalid form\n" return &appError{nil, msg, http.StatusBadRequest} } parentDir := r.FormValue("parent_dir") if parentDir == "" { msg := "Invalid parent_dir\n" return &appError{nil, msg, http.StatusBadRequest} } parentDir = normalizeUTF8Path(parentDir) parentDir = getCanonPath(parentDir) dirents := r.FormValue("dirents") if dirents == "" { msg := "Invalid dirents\n" return &appError{nil, msg, http.StatusBadRequest} } // opStr:=r.FormVale("op") list, err := jsonToDirentList(repo, parentDir, dirents) if err != nil { log.Warnf("failed to parse dirent list: %v", err) msg := "Invalid dirents\n" return &appError{nil, msg, http.StatusBadRequest} } if len(list) == 0 { msg := "Invalid dirents\n" return &appError{nil, msg, http.StatusBadRequest} } obj := make(map[string]interface{}) if len(list) == 1 { dent := list[0] op = "download-dir-link" obj["dir_name"] = dent.Name obj["obj_id"] = dent.ID } else { op = "download-multi-link" obj["parent_dir"] = parentDir var fileList []string for _, dent := range list { fileList = append(fileList, dent.Name) } obj["file_list"] = fileList } data, err := json.Marshal(obj) if err != nil { err := fmt.Errorf("failed to encode zip obj: %v", err) return &appError{err, "", http.StatusInternalServerError} } if err := downloadZipFile(rsp, r, string(data), repoID, user, op); err != nil { return err } return nil } // file path is not empty string if _, ok := r.Header["If-Modified-Since"]; ok { return &appError{nil, "", http.StatusNotModified} } filePath = normalizeUTF8Path(filePath) fullPath := filepath.Join(parentDir, filePath) fileName := filepath.Base(filePath) fileID, _, err := fsmgr.GetObjIDByPath(repo.StoreID, repo.RootID, fullPath) if err != nil { msg := "Invalid file_path\n" return &appError{nil, msg, http.StatusBadRequest} } rsp.Header().Set("ETag", fileID) now := time.Now() rsp.Header().Set("Last-Modified", now.Format("Mon, 2 Jan 2006 15:04:05 GMT")) rsp.Header().Set("Cache-Control", "max-age=3600") ranges := r.Header["Range"] byteRanges := strings.Join(ranges, "") var cryptKey *seafileCrypt if repo.IsEncrypted { key, err := parseCryptKey(rsp, repoID, user, repo.EncVersion) if err != nil { return err } cryptKey = key } exists, _ := fsmgr.Exists(repo.StoreID, fileID) if !exists { msg := "Invalid file id" return &appError{nil, msg, http.StatusBadRequest} } if !repo.IsEncrypted && len(byteRanges) != 0 { if err := doFileRange(rsp, r, repo, fileID, fileName, op, byteRanges, user); err != nil { return err } } else if err := doFile(rsp, r, repo, fileID, fileName, op, cryptKey, user); err != nil { return err } return nil } func jsonToDirentList(repo *repomgr.Repo, parentDir, dirents string) ([]*fsmgr.SeafDirent, error) { var list []string err := json.Unmarshal([]byte(dirents), &list) if err != nil { return nil, err } dir, err := fsmgr.GetSeafdirByPath(repo.StoreID, repo.RootID, parentDir) if err != nil { return nil, err } direntHash := make(map[string]*fsmgr.SeafDirent) for _, dent := range dir.Entries { direntHash[dent.Name] = dent } var direntList []*fsmgr.SeafDirent for _, path := range list { normPath := normalizeUTF8Path(path) if normPath == "" || normPath == "/" { return nil, fmt.Errorf("Invalid download file name: %s\n", normPath) } dent, ok := direntHash[normPath] if !ok { return nil, fmt.Errorf("failed to get dient for %s in dir %s in repo %s", normPath, parentDir, repo.StoreID) } direntList = append(direntList, dent) } return direntList, nil } */ func removeFileopExpireCache() { deleteBlockMaps := func(key interface{}, value interface{}) bool { if blkMap, ok := value.(*blockMap); ok { if blkMap.expireTime <= time.Now().Unix() { blockMapCacheTable.Delete(key) } } return true } blockMapCacheTable.Range(deleteBlockMaps) } ================================================ FILE: fileserver/fileserver.go ================================================ // Main package for Seafile file server. package main import ( "crypto/tls" "crypto/x509" "database/sql" "flag" "fmt" "io" "net/http" "os" "os/signal" "path/filepath" "runtime/debug" "strings" "syscall" "time" "github.com/go-sql-driver/mysql" "github.com/gorilla/mux" "github.com/haiwen/seafile-server/fileserver/blockmgr" "github.com/haiwen/seafile-server/fileserver/commitmgr" "github.com/haiwen/seafile-server/fileserver/fsmgr" "github.com/haiwen/seafile-server/fileserver/metrics" "github.com/haiwen/seafile-server/fileserver/option" "github.com/haiwen/seafile-server/fileserver/repomgr" "github.com/haiwen/seafile-server/fileserver/searpc" "github.com/haiwen/seafile-server/fileserver/share" "github.com/haiwen/seafile-server/fileserver/utils" log "github.com/sirupsen/logrus" "net/http/pprof" ) var dataDir, absDataDir string var centralDir string var logFile, absLogFile string var rpcPipePath string var pidFilePath string var logFp *os.File var seafileDB, ccnetDB *sql.DB var logToStdout bool func init() { flag.StringVar(¢ralDir, "F", "", "central config directory") flag.StringVar(&dataDir, "d", "", "seafile data directory") flag.StringVar(&logFile, "l", "", "log file path") flag.StringVar(&rpcPipePath, "p", "", "rpc pipe path") flag.StringVar(&pidFilePath, "P", "", "pid file path") env := os.Getenv("SEAFILE_LOG_TO_STDOUT") if env == "true" { logToStdout = true } log.SetFormatter(&LogFormatter{}) } const ( timestampFormat = "[2006-01-02 15:04:05] " ) type LogFormatter struct{} func (f *LogFormatter) Format(entry *log.Entry) ([]byte, error) { levelStr := entry.Level.String() if levelStr == "fatal" { levelStr = "ERROR" } else { levelStr = strings.ToUpper(levelStr) } level := fmt.Sprintf("[%s] ", levelStr) appName := "" if logToStdout { appName = "[fileserver] " } buf := make([]byte, 0, len(appName)+len(timestampFormat)+len(level)+len(entry.Message)+1) if logToStdout { buf = append(buf, appName...) } buf = entry.Time.AppendFormat(buf, timestampFormat) buf = append(buf, level...) buf = append(buf, entry.Message...) buf = append(buf, '\n') return buf, nil } func loadCcnetDB() { dbOpt, err := option.LoadDBOption(centralDir) if err != nil { log.Fatalf("Failed to load database: %v", err) } var dsn string timeout := "&readTimeout=60s" + "&writeTimeout=60s" if dbOpt.UseTLS && dbOpt.SkipVerify { dsn = fmt.Sprintf("%s:%s@tcp(%s:%d)/%s?tls=skip-verify%s", dbOpt.User, dbOpt.Password, dbOpt.Host, dbOpt.Port, dbOpt.CcnetDbName, timeout) } else if dbOpt.UseTLS && !dbOpt.SkipVerify { registerCA(dbOpt.CaPath) dsn = fmt.Sprintf("%s:%s@tcp(%s:%d)/%s?tls=custom%s", dbOpt.User, dbOpt.Password, dbOpt.Host, dbOpt.Port, dbOpt.CcnetDbName, timeout) } else { dsn = fmt.Sprintf("%s:%s@tcp(%s:%d)/%s?tls=%t%s", dbOpt.User, dbOpt.Password, dbOpt.Host, dbOpt.Port, dbOpt.CcnetDbName, dbOpt.UseTLS, timeout) } if dbOpt.Charset != "" { dsn = fmt.Sprintf("%s&charset=%s", dsn, dbOpt.Charset) } ccnetDB, err = sql.Open("mysql", dsn) if err != nil { log.Fatalf("Failed to open database: %v", err) } ccnetDB.SetConnMaxLifetime(5 * time.Minute) ccnetDB.SetMaxOpenConns(8) ccnetDB.SetMaxIdleConns(8) } // registerCA registers CA to verify server cert. func registerCA(capath string) { rootCertPool := x509.NewCertPool() pem, err := os.ReadFile(capath) if err != nil { log.Fatal(err) } if ok := rootCertPool.AppendCertsFromPEM(pem); !ok { log.Fatal("Failed to append PEM.") } mysql.RegisterTLSConfig("custom", &tls.Config{ RootCAs: rootCertPool, }) } func loadSeafileDB() { dbOpt, err := option.LoadDBOption(centralDir) if err != nil { log.Fatalf("Failed to load database: %v", err) } var dsn string timeout := "&readTimeout=60s" + "&writeTimeout=60s" if dbOpt.UseTLS && dbOpt.SkipVerify { dsn = fmt.Sprintf("%s:%s@tcp(%s:%d)/%s?tls=skip-verify%s", dbOpt.User, dbOpt.Password, dbOpt.Host, dbOpt.Port, dbOpt.SeafileDbName, timeout) } else if dbOpt.UseTLS && !dbOpt.SkipVerify { registerCA(dbOpt.CaPath) dsn = fmt.Sprintf("%s:%s@tcp(%s:%d)/%s?tls=custom%s", dbOpt.User, dbOpt.Password, dbOpt.Host, dbOpt.Port, dbOpt.SeafileDbName, timeout) } else { dsn = fmt.Sprintf("%s:%s@tcp(%s:%d)/%s?tls=%t%s", dbOpt.User, dbOpt.Password, dbOpt.Host, dbOpt.Port, dbOpt.SeafileDbName, dbOpt.UseTLS, timeout) } if dbOpt.Charset != "" { dsn = fmt.Sprintf("%s&charset=%s", dsn, dbOpt.Charset) } seafileDB, err = sql.Open("mysql", dsn) if err != nil { log.Fatalf("Failed to open database: %v", err) } seafileDB.SetConnMaxLifetime(5 * time.Minute) seafileDB.SetMaxOpenConns(8) seafileDB.SetMaxIdleConns(8) } func writePidFile(pid_file_path string) error { file, err := os.OpenFile(pid_file_path, os.O_CREATE|os.O_WRONLY, 0664) if err != nil { return err } defer file.Close() pid := os.Getpid() str := fmt.Sprintf("%d", pid) _, err = file.Write([]byte(str)) if err != nil { return err } return nil } func removePidfile(pid_file_path string) error { err := os.Remove(pid_file_path) if err != nil { return err } return nil } func main() { flag.Parse() if centralDir == "" { log.Fatal("central config directory must be specified.") } if pidFilePath != "" { if writePidFile(pidFilePath) != nil { log.Fatal("write pid file failed.") } } _, err := os.Stat(centralDir) if os.IsNotExist(err) { log.Fatalf("central config directory %s doesn't exist: %v.", centralDir, err) } if dataDir == "" { log.Fatal("seafile data directory must be specified.") } _, err = os.Stat(dataDir) if os.IsNotExist(err) { log.Fatalf("seafile data directory %s doesn't exist: %v.", dataDir, err) } absDataDir, err = filepath.Abs(dataDir) if err != nil { log.Fatalf("Failed to convert seafile data dir to absolute path: %v.", err) } if logToStdout { // Use default output (StdOut) } else if logFile == "" { absLogFile = filepath.Join(absDataDir, "fileserver.log") fp, err := os.OpenFile(absLogFile, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0644) if err != nil { log.Fatalf("Failed to open or create log file: %v", err) } logFp = fp log.SetOutput(fp) } else if logFile != "-" { absLogFile, err = filepath.Abs(logFile) if err != nil { log.Fatalf("Failed to convert log file path to absolute path: %v", err) } fp, err := os.OpenFile(absLogFile, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0644) if err != nil { log.Fatalf("Failed to open or create log file: %v", err) } logFp = fp log.SetOutput(fp) } if absLogFile != "" && !logToStdout { utils.Dup(int(logFp.Fd()), int(os.Stderr.Fd())) } // When logFile is "-", use default output (StdOut) if err := option.LoadSeahubConfig(); err != nil { log.Fatalf("Failed to read seahub config: %v", err) } option.LoadFileServerOptions(centralDir) loadCcnetDB() loadSeafileDB() level, err := log.ParseLevel(option.LogLevel) if err != nil { log.Info("use the default log level: info") log.SetLevel(log.InfoLevel) } else { log.SetLevel(level) } repomgr.Init(seafileDB) fsmgr.Init(centralDir, dataDir, option.FsCacheLimit) blockmgr.Init(centralDir, dataDir) commitmgr.Init(centralDir, dataDir) share.Init(ccnetDB, seafileDB, option.GroupTableName, option.CloudMode) rpcClientInit() fileopInit() syncAPIInit() sizeSchedulerInit() virtualRepoInit() initUpload() metrics.Init() router := newHTTPRouter() go handleSignals() go handleUser1Signal() log.Print("Seafile file server started.") server := new(http.Server) server.Addr = fmt.Sprintf("%s:%d", option.Host, option.Port) server.Handler = router err = server.ListenAndServe() if err != nil { log.Errorf("File server exiting: %v", err) } } func handleSignals() { signalChan := make(chan os.Signal, 1) signal.Notify(signalChan, syscall.SIGINT, syscall.SIGTERM, os.Interrupt) <-signalChan metrics.Stop() removePidfile(pidFilePath) os.Exit(0) } func handleUser1Signal() { signalChan := make(chan os.Signal, 1) signal.Notify(signalChan, syscall.SIGUSR1) for { <-signalChan logRotate() } } func logRotate() { if logToStdout { return } // reopen fileserver log fp, err := os.OpenFile(absLogFile, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0644) if err != nil { log.Fatalf("Failed to reopen fileserver log: %v", err) } log.SetOutput(fp) if logFp != nil { logFp.Close() logFp = fp } utils.Dup(int(logFp.Fd()), int(os.Stderr.Fd())) } var rpcclient *searpc.Client func rpcClientInit() { var pipePath string if rpcPipePath != "" { pipePath = filepath.Join(rpcPipePath, "seafile.sock") } else { pipePath = filepath.Join(absDataDir, "seafile.sock") } rpcclient = searpc.Init(pipePath, "seafserv-threaded-rpcserver", 10) } func newHTTPRouter() *mux.Router { r := mux.NewRouter() r.HandleFunc("/protocol-version{slash:\\/?}", handleProtocolVersion) r.Handle("/files/{.*}/{.*}", appHandler(accessCB)) r.Handle("/blks/{.*}/{.*}", appHandler(accessBlksCB)) r.Handle("/zip/{.*}", appHandler(accessZipCB)) r.Handle("/upload-api/{.*}", appHandler(uploadAPICB)) r.Handle("/upload-aj/{.*}", appHandler(uploadAjaxCB)) r.Handle("/update-api/{.*}", appHandler(updateAPICB)) r.Handle("/update-aj/{.*}", appHandler(updateAjaxCB)) r.Handle("/upload-blks-api/{.*}", appHandler(uploadBlksAPICB)) r.Handle("/upload-raw-blks-api/{.*}", appHandler(uploadRawBlksAPICB)) // links api //r.Handle("/u/{.*}", appHandler(uploadLinkCB)) r.Handle("/f/{.*}{slash:\\/?}", appHandler(accessLinkCB)) //r.Handle("/d/{.*}", appHandler(accessDirLinkCB)) r.Handle("/repos/{repoid:[\\da-z]{8}-[\\da-z]{4}-[\\da-z]{4}-[\\da-z]{4}-[\\da-z]{12}}/files/{filepath:.*}", appHandler(accessV2CB)) // file syncing api r.Handle("/repo/{repoid:[\\da-z]{8}-[\\da-z]{4}-[\\da-z]{4}-[\\da-z]{4}-[\\da-z]{12}}/permission-check{slash:\\/?}", appHandler(permissionCheckCB)) r.Handle("/repo/{repoid:[\\da-z]{8}-[\\da-z]{4}-[\\da-z]{4}-[\\da-z]{4}-[\\da-z]{12}}/commit/HEAD{slash:\\/?}", appHandler(headCommitOperCB)) r.Handle("/repo/{repoid:[\\da-z]{8}-[\\da-z]{4}-[\\da-z]{4}-[\\da-z]{4}-[\\da-z]{12}}/commit/{id:[\\da-z]{40}}", appHandler(commitOperCB)) r.Handle("/repo/{repoid:[\\da-z]{8}-[\\da-z]{4}-[\\da-z]{4}-[\\da-z]{4}-[\\da-z]{12}}/block/{id:[\\da-z]{40}}", appHandler(blockOperCB)) r.Handle("/repo/{repoid:[\\da-z]{8}-[\\da-z]{4}-[\\da-z]{4}-[\\da-z]{4}-[\\da-z]{12}}/fs-id-list{slash:\\/?}", appHandler(getFsObjIDCB)) r.Handle("/repo/head-commits-multi{slash:\\/?}", appHandler(headCommitsMultiCB)) r.Handle("/repo/{repoid:[\\da-z]{8}-[\\da-z]{4}-[\\da-z]{4}-[\\da-z]{4}-[\\da-z]{12}}/pack-fs{slash:\\/?}", appHandler(packFSCB)) r.Handle("/repo/{repoid:[\\da-z]{8}-[\\da-z]{4}-[\\da-z]{4}-[\\da-z]{4}-[\\da-z]{12}}/check-fs{slash:\\/?}", appHandler(checkFSCB)) r.Handle("/repo/{repoid:[\\da-z]{8}-[\\da-z]{4}-[\\da-z]{4}-[\\da-z]{4}-[\\da-z]{12}}/check-blocks{slash:\\/?}", appHandler(checkBlockCB)) r.Handle("/repo/{repoid:[\\da-z]{8}-[\\da-z]{4}-[\\da-z]{4}-[\\da-z]{4}-[\\da-z]{12}}/recv-fs{slash:\\/?}", appHandler(recvFSCB)) r.Handle("/repo/{repoid:[\\da-z]{8}-[\\da-z]{4}-[\\da-z]{4}-[\\da-z]{4}-[\\da-z]{12}}/quota-check{slash:\\/?}", appHandler(getCheckQuotaCB)) r.Handle("/repo/{repoid:[\\da-z]{8}-[\\da-z]{4}-[\\da-z]{4}-[\\da-z]{4}-[\\da-z]{12}}/jwt-token{slash:\\/?}", appHandler(getJWTTokenCB)) // seadrive api r.Handle("/repo/{repoid:[\\da-z]{8}-[\\da-z]{4}-[\\da-z]{4}-[\\da-z]{4}-[\\da-z]{12}}/block-map/{id:[\\da-z]{40}}", appHandler(getBlockMapCB)) r.Handle("/accessible-repos{slash:\\/?}", appHandler(getAccessibleRepoListCB)) // pprof r.Handle("/debug/pprof", &profileHandler{http.HandlerFunc(pprof.Index)}) r.Handle("/debug/pprof/cmdline", &profileHandler{http.HandlerFunc(pprof.Cmdline)}) r.Handle("/debug/pprof/profile", &profileHandler{http.HandlerFunc(pprof.Profile)}) r.Handle("/debug/pprof/symbol", &profileHandler{http.HandlerFunc(pprof.Symbol)}) r.Handle("/debug/pprof/heap", &profileHandler{pprof.Handler("heap")}) r.Handle("/debug/pprof/block", &profileHandler{pprof.Handler("block")}) r.Handle("/debug/pprof/goroutine", &profileHandler{pprof.Handler("goroutine")}) r.Handle("/debug/pprof/threadcreate", &profileHandler{pprof.Handler("threadcreate")}) r.Handle("/debug/pprof/trace", &traceHandler{}) if option.HasRedisOptions { r.Use(metrics.MetricMiddleware) } return r } func handleProtocolVersion(rsp http.ResponseWriter, r *http.Request) { io.WriteString(rsp, "{\"version\": 2}") } type appError struct { Error error Message string Code int } type appHandler func(http.ResponseWriter, *http.Request) *appError func (fn appHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { if e := fn(w, r); e != nil { if e.Error != nil && e.Code == http.StatusInternalServerError { log.Errorf("path %s internal server error: %v\n", r.URL.Path, e.Error) } http.Error(w, e.Message, e.Code) } } func RecoverWrapper(f func()) { defer func() { if err := recover(); err != nil { log.Errorf("panic: %v\n%s", err, debug.Stack()) } }() f() } type profileHandler struct { pHandler http.Handler } func (p *profileHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { queries := r.URL.Query() password := queries.Get("password") if !option.EnableProfiling || password != option.ProfilePassword { http.Error(w, "", http.StatusUnauthorized) return } p.pHandler.ServeHTTP(w, r) } type traceHandler struct { } func (p *traceHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { queries := r.URL.Query() password := queries.Get("password") if !option.EnableProfiling || password != option.ProfilePassword { http.Error(w, "", http.StatusUnauthorized) return } pprof.Trace(w, r) } ================================================ FILE: fileserver/fsmgr/fsmgr.go ================================================ // Package fsmgr manages fs objects package fsmgr import ( "bytes" "compress/zlib" "crypto/sha1" "encoding/hex" "fmt" "io" "path/filepath" "strings" "sync" "syscall" "time" "unsafe" "github.com/haiwen/seafile-server/fileserver/objstore" "github.com/haiwen/seafile-server/fileserver/utils" jsoniter "github.com/json-iterator/go" "github.com/dgraph-io/ristretto" ) var json = jsoniter.ConfigCompatibleWithStandardLibrary // Seafile is a file object type Seafile struct { data []byte Version int `json:"version"` FileType int `json:"type"` FileID string `json:"-"` FileSize uint64 `json:"size"` BlkIDs []string `json:"block_ids"` } // In the JSON encoding generated by C language, there are spaces after the ',' and ':', and the order of the fields is sorted by the key. // So it is not compatible with the json library generated by go. func (file *Seafile) toJSON() ([]byte, error) { var buf bytes.Buffer buf.WriteByte('{') buf.WriteString("\"block_ids\": [") for i, blkID := range file.BlkIDs { data, err := json.Marshal(blkID) if err != nil { return nil, err } buf.Write(data) if i < len(file.BlkIDs)-1 { buf.WriteByte(',') buf.WriteByte(' ') } } buf.WriteByte(']') buf.WriteByte(',') buf.WriteByte(' ') data, err := json.Marshal(file.FileSize) if err != nil { return nil, err } writeField(&buf, "\"size\"", data) buf.WriteByte(',') buf.WriteByte(' ') data, err = json.Marshal(SeafMetadataTypeFile) if err != nil { return nil, err } writeField(&buf, "\"type\"", data) buf.WriteByte(',') buf.WriteByte(' ') data, err = json.Marshal(file.Version) if err != nil { return nil, err } writeField(&buf, "\"version\"", data) buf.WriteByte('}') return buf.Bytes(), nil } func writeField(buf *bytes.Buffer, key string, value []byte) { buf.WriteString(key) buf.WriteByte(':') buf.WriteByte(' ') buf.Write(value) } // SeafDirent is a dir entry object type SeafDirent struct { Mode uint32 `json:"mode"` ID string `json:"id"` Name string `json:"name"` Mtime int64 `json:"mtime"` Modifier string `json:"modifier"` Size int64 `json:"size"` } func (dent *SeafDirent) toJSON() ([]byte, error) { var buf bytes.Buffer buf.WriteByte('{') data, err := json.Marshal(dent.ID) if err != nil { return nil, err } writeField(&buf, "\"id\"", data) buf.WriteByte(',') buf.WriteByte(' ') data, err = json.Marshal(dent.Mode) if err != nil { return nil, err } writeField(&buf, "\"mode\"", data) buf.WriteByte(',') buf.WriteByte(' ') if IsRegular(dent.Mode) { data, err = jsonNoEscape(dent.Modifier) if err != nil { return nil, err } writeField(&buf, "\"modifier\"", data) buf.WriteByte(',') buf.WriteByte(' ') } data, err = json.Marshal(dent.Mtime) if err != nil { return nil, err } writeField(&buf, "\"mtime\"", data) buf.WriteByte(',') buf.WriteByte(' ') data, err = jsonNoEscape(dent.Name) if err != nil { return nil, err } writeField(&buf, "\"name\"", data) if IsRegular(dent.Mode) { buf.WriteByte(',') buf.WriteByte(' ') data, err = json.Marshal(dent.Size) if err != nil { return nil, err } writeField(&buf, "\"size\"", data) } buf.WriteByte('}') return buf.Bytes(), nil } // In golang json, the string is encoded using HTMLEscape, which replaces "<", ">", "&", U+2028, and U+2029 are escaped to "\u003c","\u003e", "\u0026", "\u2028", and "\u2029". // So it is not compatible with the json library generated by c. This replacement can be disabled when using an Encoder, by calling SetEscapeHTML(false). func jsonNoEscape(data interface{}) ([]byte, error) { var buf bytes.Buffer encoder := json.NewEncoder(&buf) encoder.SetEscapeHTML(false) if err := encoder.Encode(data); err != nil { return nil, err } bytes := buf.Bytes() // Encode will terminate each value with a newline. // This makes the output look a little nicer // when debugging, and some kind of space // is required if the encoded value was a number, // so that the reader knows there aren't more // digits coming. // The newline at the end needs to be removed for the above reasons. return bytes[:len(bytes)-1], nil } // SeafDir is a dir object type SeafDir struct { data []byte Version int `json:"version"` DirType int `json:"type"` DirID string `json:"-"` Entries []*SeafDirent `json:"dirents"` } func (dir *SeafDir) toJSON() ([]byte, error) { var buf bytes.Buffer buf.WriteByte('{') buf.WriteString("\"dirents\": [") for i, entry := range dir.Entries { data, err := entry.toJSON() if err != nil { return nil, err } buf.Write(data) if i < len(dir.Entries)-1 { buf.WriteByte(',') buf.WriteByte(' ') } } buf.WriteByte(']') buf.WriteByte(',') buf.WriteByte(' ') data, err := json.Marshal(SeafMetadataTypeDir) if err != nil { return nil, err } writeField(&buf, "\"type\"", data) buf.WriteByte(',') buf.WriteByte(' ') data, err = json.Marshal(dir.Version) if err != nil { return nil, err } writeField(&buf, "\"version\"", data) buf.WriteByte('}') return buf.Bytes(), nil } // FileCountInfo contains information of files type FileCountInfo struct { FileCount int64 Size int64 DirCount int64 } // Meta data type of dir or file const ( SeafMetadataTypeInvalid = iota SeafMetadataTypeFile SeafMetadataTypeLink SeafMetadataTypeDir ) var store *objstore.ObjectStore // Empty value of sha1 const ( EmptySha1 = "0000000000000000000000000000000000000000" ) // Since zlib library allocates a large amount of memory every time a new reader is created, when the number of calls is too large, // the GC will be executed frequently, resulting in high CPU usage. var zlibReaders []io.ReadCloser var zlibLock sync.Mutex // Add fs cache, on the one hand to avoid repeated creation and destruction of repeatedly accessed objects, // on the other hand it will also slow down the speed at which objects are released. var fsCache *ristretto.Cache // Init initializes fs manager and creates underlying object store. func Init(seafileConfPath string, seafileDataDir string, fsCacheLimit int64) { store = objstore.New(seafileConfPath, seafileDataDir, "fs") fsCache, _ = ristretto.NewCache(&ristretto.Config{ NumCounters: 1e7, // number of keys to track frequency of (10M). MaxCost: fsCacheLimit, // maximum cost of cache. BufferItems: 64, // number of keys per Get buffer. Cost: calCost, }) } func calCost(value interface{}) int64 { return sizeOf(value) } const ( sizeOfString = int64(unsafe.Sizeof(string(""))) sizeOfPointer = int64(unsafe.Sizeof(uintptr(0))) sizeOfSeafile = int64(unsafe.Sizeof(Seafile{})) sizeOfSeafDir = int64(unsafe.Sizeof(SeafDir{})) sizeOfSeafDirent = int64(unsafe.Sizeof(SeafDirent{})) ) func sizeOf(a interface{}) int64 { var size int64 switch x := a.(type) { case string: return sizeOfString + int64(len(x)) case []string: for _, s := range x { size += sizeOf(s) } return size case *Seafile: size = sizeOfPointer size += sizeOfSeafile size += int64(len(x.FileID)) size += sizeOf(x.BlkIDs) return size case *SeafDir: size = sizeOfPointer size += sizeOfSeafDir size += int64(len(x.DirID)) for _, dent := range x.Entries { size += sizeOf(dent) } return size case *SeafDirent: size = sizeOfPointer size += sizeOfSeafDirent size += int64(len(x.ID)) size += int64(len(x.Name)) size += int64(len(x.Modifier)) return size } return 0 } func initZlibReader() (io.ReadCloser, error) { var buf bytes.Buffer // Since the corresponding reader has not been obtained when zlib is initialized, // an io.Reader needs to be built to initialize zlib. w := zlib.NewWriter(&buf) w.Close() r, err := zlib.NewReader(&buf) if err != nil { return nil, err } return r, nil } // GetOneZlibReader gets a zlib reader from zlibReaders. func GetOneZlibReader() io.ReadCloser { zlibLock.Lock() defer zlibLock.Unlock() var reader io.ReadCloser if len(zlibReaders) == 0 { reader, err := initZlibReader() if err != nil { return nil } return reader } reader = zlibReaders[0] zlibReaders = zlibReaders[1:] return reader } func ReturnOneZlibReader(reader io.ReadCloser) { if reader == nil { return } zlibLock.Lock() defer zlibLock.Unlock() zlibReaders = append(zlibReaders, reader) } // NewDirent initializes a SeafDirent object func NewDirent(id string, name string, mode uint32, mtime int64, modifier string, size int64) *SeafDirent { dent := new(SeafDirent) dent.ID = id if id == "" { dent.ID = EmptySha1 } dent.Name = name dent.Mode = mode dent.Mtime = mtime if IsRegular(mode) { dent.Modifier = modifier dent.Size = size } return dent } // NewSeafdir initializes a SeafDir object func NewSeafdir(version int, entries []*SeafDirent) (*SeafDir, error) { dir := new(SeafDir) dir.Version = version dir.Entries = entries if len(entries) == 0 { dir.DirID = EmptySha1 return dir, nil } jsonstr, err := dir.toJSON() if err != nil { err := fmt.Errorf("failed to convert seafdir to json") return nil, err } dir.data = jsonstr checksum := sha1.Sum(jsonstr) dir.DirID = hex.EncodeToString(checksum[:]) return dir, nil } // NewSeafile initializes a Seafile object func NewSeafile(version int, fileSize int64, blkIDs []string) (*Seafile, error) { seafile := new(Seafile) seafile.Version = version seafile.FileSize = uint64(fileSize) seafile.BlkIDs = blkIDs if len(blkIDs) == 0 { seafile.FileID = EmptySha1 return seafile, nil } jsonstr, err := seafile.toJSON() if err != nil { err := fmt.Errorf("failed to convert seafile to json") return nil, err } seafile.data = jsonstr checkSum := sha1.Sum(jsonstr) seafile.FileID = hex.EncodeToString(checkSum[:]) return seafile, nil } func uncompress(p []byte, reader io.ReadCloser) ([]byte, error) { b := bytes.NewReader(p) var out bytes.Buffer if reader == nil { r, err := zlib.NewReader(b) if err != nil { return nil, err } _, err = io.Copy(&out, r) if err != nil { r.Close() return nil, err } r.Close() return out.Bytes(), nil } // resue the old zlib reader. resetter, _ := reader.(zlib.Resetter) err := resetter.Reset(b, nil) if err != nil { return nil, err } _, err = io.Copy(&out, reader) if err != nil { return nil, err } return out.Bytes(), nil } func compress(p []byte) ([]byte, error) { var out bytes.Buffer w := zlib.NewWriter(&out) _, err := w.Write(p) if err != nil { w.Close() return nil, err } w.Close() return out.Bytes(), nil } // FromData reads from p and converts JSON-encoded data to Seafile. func (seafile *Seafile) FromData(p []byte, reader io.ReadCloser) error { b, err := uncompress(p, reader) if err != nil { return err } err = json.Unmarshal(b, seafile) if err != nil { return err } if seafile.FileType != SeafMetadataTypeFile { return fmt.Errorf("object %s is not a file", seafile.FileID) } if seafile.Version < 1 { return fmt.Errorf("seafile object %s version should be > 0, version is %d", seafile.FileID, seafile.Version) } if seafile.BlkIDs == nil { return fmt.Errorf("no block id array in seafile object %s", seafile.FileID) } for _, blkID := range seafile.BlkIDs { if !utils.IsObjectIDValid(blkID) { return fmt.Errorf("block id %s is invalid", blkID) } } return nil } // ToData converts seafile to JSON-encoded data and writes to w. func (seafile *Seafile) ToData(w io.Writer) error { buf, err := compress(seafile.data) if err != nil { return err } _, err = w.Write(buf) if err != nil { return err } return nil } // ToData converts seafdir to JSON-encoded data and writes to w. func (seafdir *SeafDir) ToData(w io.Writer) error { buf, err := compress(seafdir.data) if err != nil { return err } _, err = w.Write(buf) if err != nil { return err } return nil } // FromData reads from p and converts JSON-encoded data to SeafDir. func (seafdir *SeafDir) FromData(p []byte, reader io.ReadCloser) error { b, err := uncompress(p, reader) if err != nil { return err } err = json.Unmarshal(b, seafdir) if err != nil { return err } if seafdir.DirType != SeafMetadataTypeDir { return fmt.Errorf("object %s is not a dir", seafdir.DirID) } if seafdir.Version < 1 { return fmt.Errorf("dir object %s version should be > 0, version is %d", seafdir.DirID, seafdir.Version) } if seafdir.Entries == nil { return fmt.Errorf("no dirents in dir object %s", seafdir.DirID) } for _, dent := range seafdir.Entries { if !utils.IsObjectIDValid(dent.ID) { return fmt.Errorf("dirent id %s is invalid", dent.ID) } } return nil } // ReadRaw reads data in binary format from storage backend. func ReadRaw(repoID string, objID string, w io.Writer) error { err := store.Read(repoID, objID, w) if err != nil { return err } return nil } // WriteRaw writes data in binary format to storage backend. func WriteRaw(repoID string, objID string, r io.Reader) error { err := store.Write(repoID, objID, r, false) if err != nil { return err } return nil } // GetSeafile gets seafile from storage backend. func GetSeafile(repoID string, fileID string) (*Seafile, error) { return getSeafile(repoID, fileID, nil) } // GetSeafileWithZlibReader gets seafile from storage backend with a zlib reader. func GetSeafileWithZlibReader(repoID string, fileID string, reader io.ReadCloser) (*Seafile, error) { return getSeafile(repoID, fileID, reader) } func getSeafile(repoID string, fileID string, reader io.ReadCloser) (*Seafile, error) { var buf bytes.Buffer seafile := new(Seafile) if fileID == EmptySha1 { seafile.FileID = EmptySha1 return seafile, nil } seafile.FileID = fileID err := ReadRaw(repoID, fileID, &buf) if err != nil { errors := fmt.Errorf("failed to read seafile object from storage : %v", err) return nil, errors } err = seafile.FromData(buf.Bytes(), reader) if err != nil { errors := fmt.Errorf("failed to parse seafile object %s/%s : %v", repoID, fileID, err) return nil, errors } if seafile.Version < 1 { errors := fmt.Errorf("seafile object %s/%s version should be > 0", repoID, fileID) return nil, errors } return seafile, nil } // SaveSeafile saves seafile to storage backend. func SaveSeafile(repoID string, seafile *Seafile) error { fileID := seafile.FileID if fileID == EmptySha1 { return nil } exist, _ := store.Exists(repoID, fileID) if exist { return nil } seafile.FileType = SeafMetadataTypeFile var buf bytes.Buffer err := seafile.ToData(&buf) if err != nil { errors := fmt.Errorf("failed to convert seafile object %s/%s to json", repoID, fileID) return errors } err = WriteRaw(repoID, fileID, &buf) if err != nil { errors := fmt.Errorf("failed to write seafile object to storage : %v", err) return errors } return nil } // GetSeafdir gets seafdir from storage backend. func GetSeafdir(repoID string, dirID string) (*SeafDir, error) { return getSeafdir(repoID, dirID, nil, false) } // GetSeafdir gets seafdir from storage backend with a zlib reader. func GetSeafdirWithZlibReader(repoID string, dirID string, reader io.ReadCloser) (*SeafDir, error) { return getSeafdir(repoID, dirID, reader, true) } func getSeafdir(repoID string, dirID string, reader io.ReadCloser, useCache bool) (*SeafDir, error) { var seafdir *SeafDir if useCache { seafdir = getSeafdirFromCache(repoID, dirID) if seafdir != nil { return seafdir, nil } } var buf bytes.Buffer seafdir = new(SeafDir) if dirID == EmptySha1 { seafdir.DirID = EmptySha1 return seafdir, nil } seafdir.DirID = dirID err := ReadRaw(repoID, dirID, &buf) if err != nil { errors := fmt.Errorf("failed to read seafdir object from storage : %v", err) return nil, errors } err = seafdir.FromData(buf.Bytes(), reader) if err != nil { errors := fmt.Errorf("failed to parse seafdir object %s/%s : %v", repoID, dirID, err) return nil, errors } if seafdir.Version < 1 { errors := fmt.Errorf("seadir object %s/%s version should be > 0", repoID, dirID) return nil, errors } if useCache { setSeafdirToCache(repoID, seafdir) } return seafdir, nil } func getSeafdirFromCache(repoID string, dirID string) *SeafDir { key := repoID + dirID v, ok := fsCache.Get(key) if !ok { return nil } seafdir, ok := v.(*SeafDir) if ok { return seafdir } return nil } func setSeafdirToCache(repoID string, seafdir *SeafDir) error { key := repoID + seafdir.DirID fsCache.SetWithTTL(key, seafdir, 0, time.Duration(1*time.Hour)) return nil } // SaveSeafdir saves seafdir to storage backend. func SaveSeafdir(repoID string, seafdir *SeafDir) error { dirID := seafdir.DirID if dirID == EmptySha1 { return nil } exist, _ := store.Exists(repoID, dirID) if exist { return nil } seafdir.DirType = SeafMetadataTypeDir var buf bytes.Buffer err := seafdir.ToData(&buf) if err != nil { errors := fmt.Errorf("failed to convert seafdir object %s/%s to json", repoID, dirID) return errors } err = WriteRaw(repoID, dirID, &buf) if err != nil { errors := fmt.Errorf("failed to write seafdir object to storage : %v", err) return errors } return nil } // Exists check if fs object is exists. func Exists(repoID string, objID string) (bool, error) { if objID == EmptySha1 { return true, nil } return store.Exists(repoID, objID) } func comp(c rune) bool { return c == '/' } // IsDir check if the mode is dir. func IsDir(m uint32) bool { return (m & syscall.S_IFMT) == syscall.S_IFDIR } // IsRegular Check if the mode is regular. func IsRegular(m uint32) bool { return (m & syscall.S_IFMT) == syscall.S_IFREG } // ErrPathNoExist is an error indicating that the file does not exist var ErrPathNoExist = fmt.Errorf("path does not exist") // GetSeafdirByPath gets the object of seafdir by path. func GetSeafdirByPath(repoID string, rootID string, path string) (*SeafDir, error) { dir, err := GetSeafdir(repoID, rootID) if err != nil { errors := fmt.Errorf("directory is missing") return nil, errors } path = filepath.Join("/", path) parts := strings.FieldsFunc(path, comp) var dirID string for _, name := range parts { entries := dir.Entries for _, v := range entries { if v.Name == name && IsDir(v.Mode) { dirID = v.ID break } } if dirID == `` { return nil, ErrPathNoExist } dir, err = GetSeafdir(repoID, dirID) if err != nil { errors := fmt.Errorf("directory is missing") return nil, errors } } return dir, nil } // GetSeafdirIDByPath gets the dirID of SeafDir by path. func GetSeafdirIDByPath(repoID, rootID, path string) (string, error) { dirID, mode, err := GetObjIDByPath(repoID, rootID, path) if err != nil { err := fmt.Errorf("failed to get dir id by path: %s: %w", path, err) return "", err } if dirID == "" || !IsDir(mode) { return "", nil } return dirID, nil } // GetObjIDByPath gets the obj id by path func GetObjIDByPath(repoID, rootID, path string) (string, uint32, error) { var name string var baseDir *SeafDir formatPath := filepath.Join(path) if len(formatPath) == 0 || formatPath == "/" { return rootID, syscall.S_IFDIR, nil } index := strings.Index(formatPath, "/") if index < 0 { dir, err := GetSeafdir(repoID, rootID) if err != nil { err := fmt.Errorf("failed to find root dir %s: %v", rootID, err) return "", 0, err } name = formatPath baseDir = dir } else { name = filepath.Base(formatPath) dirName := filepath.Dir(formatPath) dir, err := GetSeafdirByPath(repoID, rootID, dirName) if err != nil { if err == ErrPathNoExist { return "", syscall.S_IFDIR, ErrPathNoExist } err := fmt.Errorf("failed to find dir %s in repo %s: %v", dirName, repoID, err) return "", syscall.S_IFDIR, err } baseDir = dir } entries := baseDir.Entries for _, de := range entries { if de.Name == name { return de.ID, de.Mode, nil } } return "", 0, nil } // GetFileCountInfoByPath gets the count info of file by path. func GetFileCountInfoByPath(repoID, rootID, path string) (*FileCountInfo, error) { dirID, err := GetSeafdirIDByPath(repoID, rootID, path) if err != nil { err := fmt.Errorf("failed to get file count info for repo %s path %s: %v", repoID, path, err) return nil, err } info, err := getFileCountInfo(repoID, dirID) if err != nil { err := fmt.Errorf("failed to get file count in repo %s: %v", repoID, err) return nil, err } return info, nil } func getFileCountInfo(repoID, dirID string) (*FileCountInfo, error) { dir, err := GetSeafdir(repoID, dirID) if err != nil { err := fmt.Errorf("failed to get dir: %v", err) return nil, err } info := new(FileCountInfo) entries := dir.Entries for _, de := range entries { if IsDir(de.Mode) { tmpInfo, err := getFileCountInfo(repoID, de.ID) if err != nil { err := fmt.Errorf("failed to get file count: %v", err) return nil, err } info.DirCount = tmpInfo.DirCount + 1 info.FileCount += tmpInfo.FileCount info.Size += tmpInfo.Size } else { info.FileCount++ info.Size += de.Size } } return info, nil } func GetDirentByPath(repoID, rootID, rpath string) (*SeafDirent, error) { parentDir := filepath.Dir(rpath) fileName := filepath.Base(rpath) var dir *SeafDir var err error if parentDir == "." { dir, err = GetSeafdir(repoID, rootID) if err != nil { return nil, err } } else { dir, err = GetSeafdirByPath(repoID, rootID, parentDir) if err != nil { return nil, err } } for _, de := range dir.Entries { if de.Name == fileName { return de, nil } } return nil, ErrPathNoExist } ================================================ FILE: fileserver/fsmgr/fsmgr_test.go ================================================ package fsmgr import ( "fmt" "os" "testing" ) const ( seafileConfPath = "/tmp/conf" seafileDataDir = "/tmp/conf/seafile-data" repoID = "b1f2ad61-9164-418a-a47f-ab805dbd5694" blkID = "0401fc662e3bc87a41f299a907c056aaf8322a26" subDirID = "0401fc662e3bc87a41f299a907c056aaf8322a27" ) var dirID string var fileID string func createFile() error { var blkIDs []string for i := 0; i < 2; i++ { blkshal := blkID blkIDs = append(blkIDs, blkshal) } seafile, err := NewSeafile(1, 100, blkIDs) if err != nil { return err } err = SaveSeafile(repoID, seafile) if err != nil { return err } fileID = seafile.FileID var entries []*SeafDirent for i := 0; i < 2; i++ { dirent := SeafDirent{ID: subDirID, Name: "/", Mode: 0x4000} entries = append(entries, &dirent) } seafdir, err := NewSeafdir(1, entries) if err != nil { err := fmt.Errorf("failed to new seafdir: %v", err) return err } err = SaveSeafdir(repoID, seafdir) if err != nil { return err } dirID = seafdir.DirID return nil } func delFile() error { err := os.RemoveAll(seafileConfPath) if err != nil { return err } return nil } func TestMain(m *testing.M) { Init(seafileConfPath, seafileDataDir, 2<<30) err := createFile() if err != nil { fmt.Printf("Failed to create test file : %v.\n", err) os.Exit(1) } code := m.Run() err = delFile() if err != nil { fmt.Printf("Failed to remove test file : %v\n", err) } os.Exit(code) } func TestGetSeafile(t *testing.T) { exists, err := Exists(repoID, fileID) if !exists { t.Errorf("seafile is not exists : %v.\n", err) } seafile, err := GetSeafile(repoID, fileID) if err != nil || seafile == nil { t.Errorf("Failed to get seafile : %v.\n", err) t.FailNow() } for _, v := range seafile.BlkIDs { if v != blkID { t.Errorf("Wrong file content.\n") } } } func TestGetSeafdir(t *testing.T) { exists, err := Exists(repoID, dirID) if !exists { t.Errorf("seafile is not exists : %v.\n", err) } seafdir, err := GetSeafdir(repoID, dirID) if err != nil || seafdir == nil { t.Errorf("Failed to get seafdir : %v.\n", err) t.FailNow() } for _, v := range seafdir.Entries { if v.ID != subDirID { t.Errorf("Wrong file content.\n") } } } func TestGetSeafdirByPath(t *testing.T) { seafdir, err := GetSeafdirByPath(repoID, dirID, "/") if err != nil || seafdir == nil { t.Errorf("Failed to get seafdir : %v.\n", err) t.FailNow() } for _, v := range seafdir.Entries { if v.ID != subDirID { t.Errorf("Wrong file content.\n") } } } ================================================ FILE: fileserver/go.mod ================================================ module github.com/haiwen/seafile-server/fileserver go 1.22 require ( github.com/dgraph-io/ristretto v0.2.0 github.com/go-redis/redis/v8 v8.11.5 github.com/go-sql-driver/mysql v1.5.0 github.com/golang-jwt/jwt/v5 v5.2.2 github.com/google/uuid v1.1.1 github.com/gorilla/mux v1.7.4 github.com/json-iterator/go v1.1.12 github.com/sirupsen/logrus v1.9.3 golang.org/x/text v0.3.8 gopkg.in/ini.v1 v1.55.0 ) require ( github.com/cespare/xxhash/v2 v2.1.2 // indirect github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect github.com/dustin/go-humanize v1.0.1 // indirect github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421 // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/smartystreets/goconvey v1.6.4 // indirect golang.org/x/sys v0.11.0 // indirect ) ================================================ FILE: fileserver/go.sum ================================================ github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dgraph-io/ristretto v0.2.0 h1:XAfl+7cmoUDWW/2Lx8TGZQjjxIQ2Ley9DSf52dru4WE= github.com/dgraph-io/ristretto v0.2.0/go.mod h1:8uBHCU/PBV4Ag0CJrP47b9Ofby5dqWNh4FicAdoqFNU= github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 h1:fAjc9m62+UWV/WAFKLNi6ZS0675eEUC9y3AlwSbQu1Y= github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/go-redis/redis/v8 v8.11.5 h1:AcZZR7igkdvfVmQTPnu9WE37LRrO/YrBH5zWyjDC0oI= github.com/go-redis/redis/v8 v8.11.5/go.mod h1:gREzHqY1hg6oD9ngVRbLStwAWKhA0FEgq8Jd4h5lpwo= github.com/go-sql-driver/mysql v1.5.0 h1:ozyZYNQW3x3HtqT1jira07DN2PArx2v7/mN66gGcHOs= github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/golang-jwt/jwt/v5 v5.2.2 h1:Rl4B7itRWVtYIHFrSNd7vhTiz9UpLdi6gZhZ3wEeDy8= github.com/golang-jwt/jwt/v5 v5.2.2/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/mux v1.7.4 h1:VuZ8uybHlWmqV03+zRzdwKL4tUnIp1MAQtp1mIFE1bc= github.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421 h1:ZqeYNhU3OHLH3mGKHDcjJRFFRrJa6eAM5H+CtDdOsPc= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= github.com/onsi/gomega v1.18.1 h1:M1GfJqGRrBrrGGsbxzV5dqM2U2ApXefZCQpkukxYRLE= github.com/onsi/gomega v1.18.1/go.mod h1:0q+aL8jAiMXy9hbwj2mr5GziHiwhAIQpFmmtT5hitRs= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20210428140749-89ef3d95e781 h1:DzZ89McO9/gWPsQXS/FVKAlG02ZjaQ6AlZRBimEYOd0= golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.8 h1:nAL+RVCQ9uMn3vJZbV+MRnydTJFPf8qqY42YiA6MrqY= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/ini.v1 v1.55.0 h1:E8yzL5unfpW3M6fz/eB7Cb5MQAYSZ7GKo4Qth+N2sgQ= gopkg.in/ini.v1 v1.55.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= ================================================ FILE: fileserver/http_code.go ================================================ package main const ( seafHTTPResBadFileName = 440 seafHTTPResExists = 441 seafHTTPResNotExists = 441 seafHTTPResTooLarge = 442 seafHTTPResNoQuota = 443 seafHTTPResRepoDeleted = 444 seafHTTPResRepoCorrupted = 445 seafHTTPResBlockMissing = 446 ) ================================================ FILE: fileserver/merge.go ================================================ package main import ( "bytes" "encoding/json" "fmt" "net/http" "path/filepath" "sort" "strings" "time" "github.com/haiwen/seafile-server/fileserver/commitmgr" "github.com/haiwen/seafile-server/fileserver/fsmgr" "github.com/haiwen/seafile-server/fileserver/option" "github.com/haiwen/seafile-server/fileserver/utils" ) type mergeOptions struct { remoteRepoID string remoteHead string mergedRoot string conflict bool emailToNickname map[string]string } func mergeTrees(storeID string, roots []string, opt *mergeOptions) error { if len(roots) != 3 { err := fmt.Errorf("invalid argument") return err } opt.emailToNickname = make(map[string]string) var trees []*fsmgr.SeafDir for i := 0; i < 3; i++ { dir, err := fsmgr.GetSeafdir(storeID, roots[i]) if err != nil { err := fmt.Errorf("failed to get dir: %v", err) return err } trees = append(trees, dir) } err := mergeTreesRecursive(storeID, trees, "", opt) if err != nil { err := fmt.Errorf("failed to merge trees: %v", err) return err } return nil } func mergeTreesRecursive(storeID string, trees []*fsmgr.SeafDir, baseDir string, opt *mergeOptions) error { var ptrs [3][]*fsmgr.SeafDirent var mergedDents []*fsmgr.SeafDirent n := 3 for i := 0; i < n; i++ { if trees[i] != nil { ptrs[i] = trees[i].Entries } } var done bool var offset = make([]int, n) for { dents := make([]*fsmgr.SeafDirent, n) var firstName string done = true for i := 0; i < n; i++ { if len(ptrs[i]) > offset[i] { done = false dent := ptrs[i][offset[i]] if firstName == "" { firstName = dent.Name } else if dent.Name > firstName { firstName = dent.Name } } } if done { break } var nFiles, nDirs int for i := 0; i < n; i++ { if len(ptrs[i]) > offset[i] { dent := ptrs[i][offset[i]] if firstName == dent.Name { if fsmgr.IsDir(dent.Mode) { nDirs++ } else { nFiles++ } dents[i] = dent offset[i]++ } } } if nFiles > 0 { retDents, err := mergeEntries(storeID, dents, baseDir, opt) if err != nil { return err } mergedDents = append(mergedDents, retDents...) } if nDirs > 0 { retDents, err := mergeDirectories(storeID, dents, baseDir, opt) if err != nil { return err } mergedDents = append(mergedDents, retDents...) } } sort.Sort(Dirents(mergedDents)) mergedTree, err := fsmgr.NewSeafdir(1, mergedDents) if err != nil { err := fmt.Errorf("failed to new seafdir: %v", err) return err } opt.mergedRoot = mergedTree.DirID if trees[1] != nil && trees[1].DirID == mergedTree.DirID || trees[2] != nil && trees[2].DirID == mergedTree.DirID { return nil } err = fsmgr.SaveSeafdir(storeID, mergedTree) if err != nil { err := fmt.Errorf("failed to save merged tree %s/%s", storeID, baseDir) return err } return nil } func mergeEntries(storeID string, dents []*fsmgr.SeafDirent, baseDir string, opt *mergeOptions) ([]*fsmgr.SeafDirent, error) { var mergedDents []*fsmgr.SeafDirent n := 3 files := make([]*fsmgr.SeafDirent, n) for i := 0; i < n; i++ { if dents[i] != nil && !fsmgr.IsDir(dents[i].Mode) { files[i] = dents[i] } } base := files[0] head := files[1] remote := files[2] if head != nil && remote != nil { if head.ID == remote.ID { mergedDents = append(mergedDents, head) } else if base != nil && base.ID == head.ID { mergedDents = append(mergedDents, remote) } else if base != nil && base.ID == remote.ID { mergedDents = append(mergedDents, head) } else { conflictName, _ := mergeConflictFileName(storeID, opt, baseDir, head.Name) if conflictName == "" { err := fmt.Errorf("failed to generate conflict file name") return nil, err } dents[2].Name = conflictName mergedDents = append(mergedDents, head) mergedDents = append(mergedDents, remote) opt.conflict = true } } else if base != nil && head == nil && remote != nil { if base.ID != remote.ID { if dents[1] != nil { conflictName, _ := mergeConflictFileName(storeID, opt, baseDir, remote.Name) if conflictName == "" { err := fmt.Errorf("failed to generate conflict file name") return nil, err } dents[2].Name = conflictName mergedDents = append(mergedDents, remote) opt.conflict = true } else { mergedDents = append(mergedDents, remote) } } } else if base != nil && head != nil && remote == nil { if base.ID != head.ID { if dents[2] != nil { conflictName, _ := mergeConflictFileName(storeID, opt, baseDir, dents[2].Name) if conflictName == "" { err := fmt.Errorf("failed to generate conflict file name") return nil, err } dents[2].Name = conflictName mergedDents = append(mergedDents, head) opt.conflict = true } else { mergedDents = append(mergedDents, head) } } } else if base == nil && head == nil && remote != nil { if dents[1] == nil { mergedDents = append(mergedDents, remote) } else if dents[0] != nil && dents[0].ID == dents[1].ID { mergedDents = append(mergedDents, remote) } else { conflictName, _ := mergeConflictFileName(storeID, opt, baseDir, remote.Name) if conflictName == "" { err := fmt.Errorf("failed to generate conflict file name") return nil, err } dents[2].Name = conflictName mergedDents = append(mergedDents, remote) opt.conflict = true } } else if base == nil && head != nil && remote == nil { if dents[2] == nil { mergedDents = append(mergedDents, head) } else if dents[0] != nil && dents[0].ID == dents[2].ID { mergedDents = append(mergedDents, head) } else { conflictName, _ := mergeConflictFileName(storeID, opt, baseDir, dents[2].Name) if conflictName == "" { err := fmt.Errorf("failed to generate conflict file name") return nil, err } dents[2].Name = conflictName mergedDents = append(mergedDents, head) opt.conflict = true } } /* else if base != nil && head == nil && remote == nil { Don't need to add anything to mergeDents. }*/ return mergedDents, nil } func mergeDirectories(storeID string, dents []*fsmgr.SeafDirent, baseDir string, opt *mergeOptions) ([]*fsmgr.SeafDirent, error) { var dirMask int var mergedDents []*fsmgr.SeafDirent var dirName string n := 3 subDirs := make([]*fsmgr.SeafDir, n) for i := 0; i < n; i++ { if dents[i] != nil && fsmgr.IsDir(dents[i].Mode) { dirMask |= 1 << i } } switch dirMask { case 0: err := fmt.Errorf("no dirent for merge") return nil, err case 1: return mergedDents, nil case 2: mergedDents = append(mergedDents, dents[1]) return mergedDents, nil case 3: if dents[0].ID == dents[1].ID { return mergedDents, nil } case 4: mergedDents = append(mergedDents, dents[2]) return mergedDents, nil case 5: if dents[0].ID == dents[2].ID { return mergedDents, nil } case 6, 7: if dents[1].ID == dents[2].ID { mergedDents = append(mergedDents, dents[1]) return mergedDents, nil } else if dents[0] != nil && dents[0].ID == dents[1].ID { mergedDents = append(mergedDents, dents[2]) return mergedDents, nil } else if dents[0] != nil && dents[0].ID == dents[2].ID { mergedDents = append(mergedDents, dents[1]) return mergedDents, nil } default: err := fmt.Errorf("wrong dir mask for merge") return nil, err } for i := 0; i < n; i++ { subDirs[i] = nil } for i := 0; i < n; i++ { if dents[i] != nil && fsmgr.IsDir(dents[i].Mode) { dir, err := fsmgr.GetSeafdir(storeID, dents[i].ID) if err != nil { err := fmt.Errorf("failed to get seafdir %s/%s", storeID, dents[i].ID) return nil, err } subDirs[i] = dir dirName = dents[i].Name } } newBaseDir := filepath.Join(baseDir, dirName) newBaseDir = newBaseDir + "/" err := mergeTreesRecursive(storeID, subDirs, newBaseDir, opt) if err != nil { err := fmt.Errorf("failed to merge trees: %v", err) return nil, err } if dirMask == 3 || dirMask == 6 || dirMask == 7 { dent := dents[1] dent.ID = opt.mergedRoot mergedDents = append(mergedDents, dent) } else if dirMask == 5 { dent := dents[2] dent.ID = opt.mergedRoot mergedDents = append(mergedDents, dent) } return mergedDents, nil } func mergeConflictFileName(storeID string, opt *mergeOptions, baseDir, fileName string) (string, error) { var modifier string var mtime int64 filePath := filepath.Join(baseDir, fileName) modifier, mtime, err := getFileModifierMtime(opt.remoteRepoID, storeID, opt.remoteHead, filePath) if err != nil { commit, err := commitmgr.Load(opt.remoteRepoID, opt.remoteHead) if err != nil { err := fmt.Errorf("failed to get head commit") return "", err } modifier = commit.CreatorName mtime = time.Now().Unix() } nickname := getNickNameByModifier(opt.emailToNickname, modifier) conflictName := genConflictPath(fileName, nickname, mtime) return conflictName, nil } func genConflictPath(originPath, modifier string, mtime int64) string { var conflictPath string now := time.Now() timeBuf := now.Format("2006-Jan-2-15-04-05") dot := strings.Index(originPath, ".") if dot < 0 { if modifier != "" { conflictPath = fmt.Sprintf("%s (SFConflict %s %s)", originPath, modifier, timeBuf) } else { conflictPath = fmt.Sprintf("%s (SFConflict %s)", originPath, timeBuf) } } else { if modifier != "" { conflictPath = fmt.Sprintf("%s (SFConflict %s %s).%s", originPath, modifier, timeBuf, originPath[dot+1:]) } else { conflictPath = fmt.Sprintf("%s (SFConflict %s).%s", originPath, timeBuf, originPath[dot+1:]) } } return conflictPath } func getNickNameByModifier(emailToNickname map[string]string, modifier string) string { if modifier == "" { return "" } nickname, ok := emailToNickname[modifier] if ok { return nickname } if option.JWTPrivateKey != "" { nickname = postGetNickName(modifier) } if nickname == "" { nickname = modifier } emailToNickname[modifier] = nickname return nickname } func postGetNickName(modifier string) string { tokenString, err := utils.GenSeahubJWTToken() if err != nil { return "" } header := map[string][]string{ "Authorization": {"Token " + tokenString}, } data, err := json.Marshal(map[string]interface{}{ "user_id_list": []string{modifier}, }) if err != nil { return "" } url := option.SeahubURL + "/user-list/" status, body, err := utils.HttpCommon("POST", url, header, bytes.NewReader(data)) if err != nil { return "" } if status != http.StatusOK { return "" } results := make(map[string]interface{}) err = json.Unmarshal(body, &results) if err != nil { return "" } userList, ok := results["user_list"].([]interface{}) if !ok { return "" } nickname := "" for _, element := range userList { list, ok := element.(map[string]interface{}) if !ok { continue } nickname, _ = list["name"].(string) if nickname != "" { break } } return nickname } func getFileModifierMtime(repoID, storeID, head, filePath string) (string, int64, error) { commit, err := commitmgr.Load(repoID, head) if err != nil { err := fmt.Errorf("failed to get head commit") return "", -1, err } parent := filepath.Dir(filePath) if parent == "." { parent = "" } fileName := filepath.Base(filePath) dir, err := fsmgr.GetSeafdirByPath(storeID, commit.RootID, parent) if err != nil { err := fmt.Errorf("dir %s doesn't exist in repo %s", parent, repoID) return "", -1, err } var dent *fsmgr.SeafDirent entries := dir.Entries for _, d := range entries { if d.Name == fileName { dent = d break } } if dent == nil { err := fmt.Errorf("file %s doesn't exist in repo %s", fileName, repoID) return "", -1, err } return dent.Modifier, dent.Mtime, nil } ================================================ FILE: fileserver/merge_test.go ================================================ package main import ( "fmt" "os" "syscall" "testing" "github.com/haiwen/seafile-server/fileserver/commitmgr" "github.com/haiwen/seafile-server/fileserver/fsmgr" "github.com/haiwen/seafile-server/fileserver/option" ) const ( mergeTestCommitID = "0401fc662e3bc87a41f299a907c056aaf8322a27" mergeTestRepoID = "b1f2ad61-9164-418a-a47f-ab805dbd5694" mergeTestSeafileConfPath = "/tmp/conf" mergeTestSeafileDataDir = "/tmp/conf/seafile-data" ) var mergeTestTree1 string var mergeTestTree2 string var mergeTestTree3 string var mergeTestTree4 string var mergeTestTree5 string var mergeTestTree1CommitID string var mergeTestTree2CommitID string var mergeTestTree3CommitID string var mergeTestTree4CommitID string /* test directory structure: tree1 |--bbb |-- testfile(size:1) tree2 |--bbb |-- testfile(size:10) tree3 |--bbb tree4 |--bbb |-- testfile(size:100) tree5 |-- */ func mergeTestCreateTestDir() error { modeDir := uint32(syscall.S_IFDIR | 0644) modeFile := uint32(syscall.S_IFREG | 0644) emptyDir, err := mergeTestCreateSeafdir(nil) if err != nil { err := fmt.Errorf("failed to get seafdir: %v", err) return err } mergeTestTree5 = emptyDir file1, err := fsmgr.NewSeafile(1, 1, []string{"4f616f98d6a264f75abffe1bc150019c880be239"}) if err != nil { err := fmt.Errorf("failed to new seafile: %v", err) return err } err = fsmgr.SaveSeafile(mergeTestRepoID, file1) if err != nil { err := fmt.Errorf("failed to save seafile: %v", err) return err } dent1 := fsmgr.SeafDirent{ID: file1.FileID, Name: "testfile", Mode: modeFile, Size: 1} dir1, err := mergeTestCreateSeafdir([]*fsmgr.SeafDirent{&dent1}) if err != nil { err := fmt.Errorf("failed to get seafdir: %v", err) return err } dent2 := fsmgr.SeafDirent{ID: dir1, Name: "bbb", Mode: modeDir} dir2, err := mergeTestCreateSeafdir([]*fsmgr.SeafDirent{&dent2}) if err != nil { err := fmt.Errorf("failed to get seafdir: %v", err) return err } mergeTestTree1 = dir2 commit1 := commitmgr.NewCommit(mergeTestRepoID, "", mergeTestTree1, "seafile", "this is the first commit.\n") err = commitmgr.Save(commit1) if err != nil { err := fmt.Errorf("failed to save commit: %v", err) return err } mergeTestTree1CommitID = commit1.CommitID file2, err := fsmgr.NewSeafile(1, 10, []string{"4f616f98d6a264f75abffe1bc150019c880be239"}) if err != nil { err := fmt.Errorf("failed to new seafile: %v", err) return err } err = fsmgr.SaveSeafile(mergeTestRepoID, file2) if err != nil { err := fmt.Errorf("failed to save seafile: %v", err) return err } dent3 := fsmgr.SeafDirent{ID: file2.FileID, Name: "testfile", Mode: modeFile, Size: 10} dir3, err := mergeTestCreateSeafdir([]*fsmgr.SeafDirent{&dent3}) if err != nil { err := fmt.Errorf("failed to get seafdir: %v", err) return err } dent4 := fsmgr.SeafDirent{ID: dir3, Name: "bbb", Mode: modeDir} dir4, err := mergeTestCreateSeafdir([]*fsmgr.SeafDirent{&dent4}) if err != nil { err := fmt.Errorf("failed to get seafdir: %v", err) return err } mergeTestTree2 = dir4 commit2 := commitmgr.NewCommit(mergeTestRepoID, "", mergeTestTree2, "seafile", "this is the second commit.\n") err = commitmgr.Save(commit2) if err != nil { err := fmt.Errorf("failed to save commit: %v", err) return err } mergeTestTree2CommitID = commit2.CommitID dir5, err := mergeTestCreateSeafdir(nil) if err != nil { err := fmt.Errorf("failed to get seafdir: %v", err) return err } dent6 := fsmgr.SeafDirent{ID: dir5, Name: "bbb", Mode: modeDir} dir6, err := mergeTestCreateSeafdir([]*fsmgr.SeafDirent{&dent6}) if err != nil { err := fmt.Errorf("failed to get seafdir: %v", err) return err } mergeTestTree3 = dir6 commit3 := commitmgr.NewCommit(mergeTestRepoID, "", mergeTestTree3, "seafile", "this is the third commit.\n") err = commitmgr.Save(commit3) if err != nil { err := fmt.Errorf("failed to save commit: %v", err) return err } mergeTestTree3CommitID = commit3.CommitID file3, err := fsmgr.NewSeafile(1, 100, []string{"4f616f98d6a264f75abffe1bc150019c880be240"}) if err != nil { err := fmt.Errorf("failed to new seafile: %v", err) return err } err = fsmgr.SaveSeafile(mergeTestRepoID, file3) if err != nil { err := fmt.Errorf("failed to save seafile: %v", err) return err } dent7 := fsmgr.SeafDirent{ID: file3.FileID, Name: "testfile", Mode: modeFile, Size: 100} dir7, err := mergeTestCreateSeafdir([]*fsmgr.SeafDirent{&dent7}) if err != nil { err := fmt.Errorf("failed to get seafdir: %v", err) return err } dent8 := fsmgr.SeafDirent{ID: dir7, Name: "bbb", Mode: modeDir} dir8, err := mergeTestCreateSeafdir([]*fsmgr.SeafDirent{&dent8}) if err != nil { err := fmt.Errorf("failed to get seafdir: %v", err) return err } mergeTestTree4 = dir8 commit4 := commitmgr.NewCommit(mergeTestRepoID, "", mergeTestTree3, "seafile", "this is the fourth commit.\n") err = commitmgr.Save(commit4) if err != nil { err := fmt.Errorf("failed to save commit: %v", err) return err } mergeTestTree4CommitID = commit4.CommitID return nil } func mergeTestCreateSeafdir(dents []*fsmgr.SeafDirent) (string, error) { seafdir, err := fsmgr.NewSeafdir(1, dents) if err != nil { err := fmt.Errorf("failed to new seafdir: %v", err) return "", err } err = fsmgr.SaveSeafdir(mergeTestRepoID, seafdir) if err != nil { return "", err } return seafdir.DirID, nil } func mergeTestDelFile() error { err := os.RemoveAll(mergeTestSeafileConfPath) if err != nil { return err } return nil } func TestMergeTrees(t *testing.T) { commitmgr.Init(mergeTestSeafileConfPath, mergeTestSeafileDataDir) fsmgr.Init(mergeTestSeafileConfPath, mergeTestSeafileDataDir, option.FsCacheLimit) err := mergeTestCreateTestDir() if err != nil { fmt.Printf("failed to create test dir: %v", err) os.Exit(1) } t.Run("test1", testMergeTrees1) t.Run("test2", testMergeTrees2) t.Run("test3", testMergeTrees3) t.Run("test4", testMergeTrees4) t.Run("test5", testMergeTrees5) t.Run("test6", testMergeTrees6) t.Run("test7", testMergeTrees7) t.Run("test8", testMergeTrees8) t.Run("test9", testMergeTrees9) t.Run("test10", testMergeTrees10) t.Run("test11", testMergeTrees11) t.Run("test12", testMergeTrees12) err = mergeTestDelFile() if err != nil { fmt.Printf("failed to remove test file : %v", err) os.Exit(1) } } // head add file func testMergeTrees1(t *testing.T) { commit, err := commitmgr.Load(mergeTestRepoID, mergeTestTree3CommitID) if err != nil { t.Errorf("failed to load commit.\n") } roots := []string{mergeTestTree3, mergeTestTree2, mergeTestTree3} opt := new(mergeOptions) opt.remoteRepoID = mergeTestRepoID opt.remoteHead = commit.CommitID err = mergeTrees(mergeTestRepoID, roots, opt) if err != nil { t.Errorf("failed to merge.\n") } if opt.mergedRoot != mergeTestTree2 { t.Errorf("merge error %s/%s.\n", opt.mergedRoot, mergeTestTree2) } } // remote add file func testMergeTrees2(t *testing.T) { commit, err := commitmgr.Load(mergeTestRepoID, mergeTestTree3CommitID) if err != nil { t.Errorf("failed to load commit.\n") } roots := []string{mergeTestTree3, mergeTestTree3, mergeTestTree2} opt := new(mergeOptions) opt.remoteRepoID = mergeTestRepoID opt.remoteHead = commit.CommitID err = mergeTrees(mergeTestRepoID, roots, opt) if err != nil { t.Errorf("failed to merge.\n") } if opt.mergedRoot != mergeTestTree2 { t.Errorf("merge error %s/%s.\n", opt.mergedRoot, mergeTestTree2) } } // head modify file func testMergeTrees3(t *testing.T) { commit, err := commitmgr.Load(mergeTestRepoID, mergeTestTree1CommitID) if err != nil { t.Errorf("failed to load commit.\n") } roots := []string{mergeTestTree1, mergeTestTree2, mergeTestTree1} opt := new(mergeOptions) opt.remoteRepoID = mergeTestRepoID opt.remoteHead = commit.CommitID err = mergeTrees(mergeTestRepoID, roots, opt) if err != nil { t.Errorf("failed to merge.\n") } if opt.mergedRoot != mergeTestTree2 { t.Errorf("merge error %s/%s.\n", opt.mergedRoot, mergeTestTree2) } } // remote modify file func testMergeTrees4(t *testing.T) { commit, err := commitmgr.Load(mergeTestRepoID, mergeTestTree1CommitID) if err != nil { t.Errorf("failed to load commit.\n") } roots := []string{mergeTestTree1, mergeTestTree1, mergeTestTree2} opt := new(mergeOptions) opt.remoteRepoID = mergeTestRepoID opt.remoteHead = commit.CommitID err = mergeTrees(mergeTestRepoID, roots, opt) if err != nil { t.Errorf("failed to merge.\n") } if opt.mergedRoot != mergeTestTree2 { t.Errorf("merge error %s/%s.\n", opt.mergedRoot, mergeTestTree2) } } // head and remote add file func testMergeTrees5(t *testing.T) { commit, err := commitmgr.Load(mergeTestRepoID, mergeTestTree3CommitID) if err != nil { t.Errorf("failed to load commit.\n") } roots := []string{mergeTestTree3, mergeTestTree1, mergeTestTree2} opt := new(mergeOptions) opt.remoteRepoID = mergeTestRepoID opt.remoteHead = commit.CommitID err = mergeTrees(mergeTestRepoID, roots, opt) if err != nil { t.Errorf("failed to merge.\n") } if !opt.conflict { t.Errorf("merge error %s.\n", opt.mergedRoot) } } // head and remote modify file func testMergeTrees6(t *testing.T) { commit, err := commitmgr.Load(mergeTestRepoID, mergeTestTree1CommitID) if err != nil { t.Errorf("failed to load commit.\n") } roots := []string{mergeTestTree1, mergeTestTree2, mergeTestTree4} opt := new(mergeOptions) opt.remoteRepoID = mergeTestRepoID opt.remoteHead = commit.CommitID err = mergeTrees(mergeTestRepoID, roots, opt) if err != nil { t.Errorf("failed to merge.\n") } if !opt.conflict { t.Errorf("merge error %s.\n", opt.mergedRoot) } } // head modify file and remote delete file func testMergeTrees7(t *testing.T) { commit, err := commitmgr.Load(mergeTestRepoID, mergeTestTree1CommitID) if err != nil { t.Errorf("failed to load commit.\n") } roots := []string{mergeTestTree1, mergeTestTree2, mergeTestTree3} opt := new(mergeOptions) opt.remoteRepoID = mergeTestRepoID opt.remoteHead = commit.CommitID err = mergeTrees(mergeTestRepoID, roots, opt) if err != nil { t.Errorf("failed to merge.\n") } if opt.mergedRoot != mergeTestTree2 { t.Errorf("merge error %s/%s.\n", opt.mergedRoot, mergeTestTree2) } } // head delete file and remote modify file func testMergeTrees8(t *testing.T) { commit, err := commitmgr.Load(mergeTestRepoID, mergeTestTree1CommitID) if err != nil { t.Errorf("failed to load commit.\n") } roots := []string{mergeTestTree1, mergeTestTree3, mergeTestTree2} opt := new(mergeOptions) opt.remoteRepoID = mergeTestRepoID opt.remoteHead = commit.CommitID err = mergeTrees(mergeTestRepoID, roots, opt) if err != nil { t.Errorf("failed to merge.\n") } if opt.mergedRoot != mergeTestTree2 { t.Errorf("merge error %s/%s.\n", opt.mergedRoot, mergeTestTree2) } } // head modify file and remote delete dir of this file func testMergeTrees9(t *testing.T) { commit, err := commitmgr.Load(mergeTestRepoID, mergeTestTree1CommitID) if err != nil { t.Errorf("failed to load commit.\n") } roots := []string{mergeTestTree1, mergeTestTree2, mergeTestTree5} opt := new(mergeOptions) opt.remoteRepoID = mergeTestRepoID opt.remoteHead = commit.CommitID err = mergeTrees(mergeTestRepoID, roots, opt) if err != nil { t.Errorf("failed to merge.\n") } if opt.mergedRoot != mergeTestTree2 { t.Errorf("merge error %s/%s.\n", opt.mergedRoot, mergeTestTree2) } } // remote modify file and head delete dir of this file func testMergeTrees10(t *testing.T) { commit, err := commitmgr.Load(mergeTestRepoID, mergeTestTree1CommitID) if err != nil { t.Errorf("failed to load commit.\n") } roots := []string{mergeTestTree1, mergeTestTree5, mergeTestTree2} opt := new(mergeOptions) opt.remoteRepoID = mergeTestRepoID opt.remoteHead = commit.CommitID err = mergeTrees(mergeTestRepoID, roots, opt) if err != nil { t.Errorf("failed to merge.\n") } if opt.mergedRoot != mergeTestTree2 { t.Errorf("merge error %s/%s.\n", opt.mergedRoot, mergeTestTree2) } } // head add file and remote delete dir of thie file func testMergeTrees11(t *testing.T) { commit, err := commitmgr.Load(mergeTestRepoID, mergeTestTree3CommitID) if err != nil { t.Errorf("failed to load commit.\n") } roots := []string{mergeTestTree3, mergeTestTree1, mergeTestTree5} opt := new(mergeOptions) opt.remoteRepoID = mergeTestRepoID opt.remoteHead = commit.CommitID err = mergeTrees(mergeTestRepoID, roots, opt) if err != nil { t.Errorf("failed to merge.\n") } if opt.mergedRoot != mergeTestTree1 { t.Errorf("merge error %s/%s.\n", opt.mergedRoot, mergeTestTree1) } } // remote add file and head delete dir of this file func testMergeTrees12(t *testing.T) { commit, err := commitmgr.Load(mergeTestRepoID, mergeTestTree3CommitID) if err != nil { t.Errorf("failed to load commit.\n") } roots := []string{mergeTestTree3, mergeTestTree5, mergeTestTree1} opt := new(mergeOptions) opt.remoteRepoID = mergeTestRepoID opt.remoteHead = commit.CommitID err = mergeTrees(mergeTestRepoID, roots, opt) if err != nil { t.Errorf("failed to merge.\n") } if opt.mergedRoot != mergeTestTree1 { t.Errorf("merge error %s/%s.\n", opt.mergedRoot, mergeTestTree1) } } ================================================ FILE: fileserver/metrics/metrics.go ================================================ package metrics import ( "container/list" "context" "encoding/json" "fmt" "net/http" "runtime/debug" "sync" "time" "github.com/dgraph-io/ristretto/z" "github.com/go-redis/redis/v8" "github.com/haiwen/seafile-server/fileserver/option" log "github.com/sirupsen/logrus" ) const ( RedisChannel = "metric_channel" ComponentName = "go_fileserver" MetricInterval = 30 * time.Second ) type MetricMgr struct { sync.Mutex inFlightRequestList *list.List } type RequestInfo struct { urlPath string method string start time.Time } func (m *MetricMgr) AddReq(urlPath, method string) *list.Element { req := new(RequestInfo) req.urlPath = urlPath req.method = method req.start = time.Now() m.Lock() defer m.Unlock() e := m.inFlightRequestList.PushBack(req) return e } func (m *MetricMgr) DecReq(e *list.Element) { m.Lock() defer m.Unlock() m.inFlightRequestList.Remove(e) } var ( client *redis.Client closer *z.Closer metricMgr *MetricMgr ) func Init() { if !option.HasRedisOptions { return } metricMgr = new(MetricMgr) metricMgr.inFlightRequestList = list.New() closer = z.NewCloser(1) go metricsHandler() } func Stop() { if !option.HasRedisOptions { return } closer.SignalAndWait() } func metricsHandler() { defer closer.Done() defer func() { if err := recover(); err != nil { log.Errorf("panic: %v\n%s", err, debug.Stack()) } }() server := fmt.Sprintf("%s:%d", option.RedisHost, option.RedisPort) opt := &redis.Options{ Addr: server, Password: option.RedisPasswd, } opt.PoolSize = 1 client = redis.NewClient(opt) ticker := time.NewTicker(MetricInterval) defer ticker.Stop() for { select { case <-closer.HasBeenClosed(): return case <-ticker.C: err := publishMetrics() if err != nil { log.Warnf("Failed to publish metrics to redis channel: %v", err) continue } } } } func MetricMiddleware(next http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { req := metricMgr.AddReq(r.URL.Path, r.Method) next.ServeHTTP(w, r) metricMgr.DecReq(req) }) } type MetricMessage struct { MetricName string `json:"metric_name"` MetricValue any `json:"metric_value"` MetricType string `json:"metric_type"` ComponentName string `json:"component_name"` MetricHelp string `json:"metric_help"` NodeName string `json:"node_name"` } func publishMetrics() error { metricMgr.Lock() inFlightRequestCount := metricMgr.inFlightRequestList.Len() metricMgr.Unlock() msg := &MetricMessage{MetricName: "in_flight_request_total", MetricValue: inFlightRequestCount, MetricType: "gauge", ComponentName: ComponentName, MetricHelp: "The number of currently running http requests.", NodeName: option.NodeName, } data, err := json.Marshal(msg) if err != nil { return err } err = publishRedisMsg(RedisChannel, data) if err != nil { return err } return nil } func publishRedisMsg(channel string, msg []byte) error { ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() err := client.Publish(ctx, channel, msg).Err() if err != nil { return fmt.Errorf("failed to publish redis message: %w", err) } return nil } ================================================ FILE: fileserver/objstore/backend_fs.go ================================================ // Implementation of file system storage backend. package objstore import ( "io" "os" "path" ) type fsBackend struct { // Path of the object directory objDir string objType string tmpDir string } func newFSBackend(seafileDataDir string, objType string) (*fsBackend, error) { objDir := path.Join(seafileDataDir, "storage", objType) err := os.MkdirAll(objDir, os.ModePerm) if err != nil { return nil, err } tmpDir := path.Join(seafileDataDir, "tmpfiles") err = os.MkdirAll(tmpDir, os.ModePerm) if err != nil { return nil, err } backend := new(fsBackend) backend.objDir = objDir backend.objType = objType backend.tmpDir = tmpDir return backend, nil } func (b *fsBackend) read(repoID string, objID string, w io.Writer) error { p := path.Join(b.objDir, repoID, objID[:2], objID[2:]) fd, err := os.Open(p) if err != nil { return err } defer fd.Close() _, err = io.Copy(w, fd) if err != nil { return err } return nil } func (b *fsBackend) write(repoID string, objID string, r io.Reader, sync bool) error { parentDir := path.Join(b.objDir, repoID, objID[:2]) p := path.Join(parentDir, objID[2:]) err := os.MkdirAll(parentDir, os.ModePerm) if err != nil { return err } tmpDir := b.tmpDir if b.objType != "blocks" { tmpDir = parentDir } tFile, err := os.CreateTemp(tmpDir, objID+".*") if err != nil { return err } success := false defer func() { if !success { os.Remove(tFile.Name()) } }() _, err = io.Copy(tFile, r) if err != nil { tFile.Close() return err } err = tFile.Close() if err != nil { return err } err = os.Rename(tFile.Name(), p) if err != nil { return err } success = true return nil } func (b *fsBackend) exists(repoID string, objID string) (bool, error) { path := path.Join(b.objDir, repoID, objID[:2], objID[2:]) _, err := os.Stat(path) if err != nil { if os.IsNotExist(err) { return false, err } return true, err } return true, nil } func (b *fsBackend) stat(repoID string, objID string) (int64, error) { path := path.Join(b.objDir, repoID, objID[:2], objID[2:]) fileInfo, err := os.Stat(path) if err != nil { return -1, err } return fileInfo.Size(), nil } ================================================ FILE: fileserver/objstore/objstore.go ================================================ // Package objstore provides operations for commit, fs and block objects. // It is low-level package used by commitmgr, fsmgr, blockmgr packages to access storage. package objstore import ( "io" ) // ObjectStore is a container to access storage backend type ObjectStore struct { // can be "commit", "fs", or "block" ObjType string backend storageBackend } // storageBackend is the interface implemented by storage backends. // An object store may have one or multiple storage backends. type storageBackend interface { // Read an object from backend and write the contents into w. read(repoID string, objID string, w io.Writer) (err error) // Write the contents from r to the object. write(repoID string, objID string, r io.Reader, sync bool) (err error) // exists checks whether an object exists. exists(repoID string, objID string) (res bool, err error) // stat calculates an object's size stat(repoID string, objID string) (res int64, err error) } // New returns a new object store for a given type of objects. // objType can be "commit", "fs", or "block". func New(seafileConfPath string, seafileDataDir string, objType string) *ObjectStore { obj := new(ObjectStore) obj.ObjType = objType obj.backend, _ = newFSBackend(seafileDataDir, objType) return obj } // Read data from storage backends. func (s *ObjectStore) Read(repoID string, objID string, w io.Writer) (err error) { return s.backend.read(repoID, objID, w) } // Write data to storage backends. func (s *ObjectStore) Write(repoID string, objID string, r io.Reader, sync bool) (err error) { return s.backend.write(repoID, objID, r, sync) } // Check whether object exists. func (s *ObjectStore) Exists(repoID string, objID string) (res bool, err error) { return s.backend.exists(repoID, objID) } // Stat calculates object size. func (s *ObjectStore) Stat(repoID string, objID string) (res int64, err error) { return s.backend.stat(repoID, objID) } ================================================ FILE: fileserver/objstore/objstore_test.go ================================================ package objstore import ( "fmt" "os" "path" "testing" ) const ( testFile = "output.data" seafileConfPath = "/tmp/conf" seafileDataDir = "/tmp/conf/seafile-data" repoID = "b1f2ad61-9164-418a-a47f-ab805dbd5694" objID = "0401fc662e3bc87a41f299a907c056aaf8322a27" ) func createFile() error { outputFile, err := os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666) if err != nil { return err } defer outputFile.Close() outputString := "hello world!\n" for i := 0; i < 10; i++ { outputFile.WriteString(outputString) } return nil } func delFile() error { err := os.Remove(testFile) if err != nil { return err } err = os.RemoveAll(seafileConfPath) if err != nil { return err } return nil } func TestMain(m *testing.M) { err := createFile() if err != nil { fmt.Printf("Failed to create test file : %v\n", err) os.Exit(1) } code := m.Run() err = delFile() if err != nil { fmt.Printf("Failed to remove test file : %v\n", err) os.Exit(1) } os.Exit(code) } func testWrite(t *testing.T) { inputFile, err := os.Open(testFile) if err != nil { t.Errorf("Failed to open test file : %v\n", err) } defer inputFile.Close() bend := New(seafileConfPath, seafileDataDir, "commit") bend.Write(repoID, objID, inputFile, true) } func testRead(t *testing.T) { outputFile, err := os.OpenFile(testFile, os.O_WRONLY, 0666) if err != nil { t.Errorf("Failed to open test file:%v\n", err) } defer outputFile.Close() bend := New(seafileConfPath, seafileDataDir, "commit") err = bend.Read(repoID, objID, outputFile) if err != nil { t.Errorf("Failed to read backend : %s\n", err) } } func testExists(t *testing.T) { bend := New(seafileConfPath, seafileDataDir, "commit") ret, _ := bend.Exists(repoID, objID) if !ret { t.Errorf("File is not exist\n") } filePath := path.Join(seafileDataDir, "storage", "commit", repoID, objID[:2], objID[2:]) fileInfo, _ := os.Stat(filePath) if fileInfo.Size() != 130 { t.Errorf("File is exist, but the size of file is incorrect.\n") } } func TestObjStore(t *testing.T) { testWrite(t) testRead(t) testExists(t) } ================================================ FILE: fileserver/option/option.go ================================================ package option import ( "fmt" "os" "path/filepath" "strconv" "strings" "time" log "github.com/sirupsen/logrus" "gopkg.in/ini.v1" ) // InfiniteQuota indicates that the quota is unlimited. const InfiniteQuota = -2 // Storage unit. const ( KB = 1000 MB = 1000000 GB = 1000000000 TB = 1000000000000 ) var ( // fileserver options Host string Port uint32 MaxUploadSize uint64 FsIdListRequestTimeout int64 // Block size for indexing uploaded files FixedBlockSize uint64 // Maximum number of goroutines to index uploaded files MaxIndexingThreads uint32 WebTokenExpireTime uint32 // File mode for temp files ClusterSharedTempFileMode uint32 WindowsEncoding string SkipBlockHash bool FsCacheLimit int64 VerifyClientBlocks bool MaxIndexingFiles uint32 // general options CloudMode bool // notification server EnableNotification bool NotificationURL string // GROUP options GroupTableName string // quota options DefaultQuota int64 // redis options HasRedisOptions bool RedisHost string RedisPasswd string RedisPort uint32 RedisExpiry uint32 RedisMaxConn uint32 RedisTimeout time.Duration // Profile password ProfilePassword string EnableProfiling bool // Go log level LogLevel string // DB default timeout DBOpTimeout time.Duration // database DBType string // seahub SeahubURL string JWTPrivateKey string // metric NodeName string ) type DBOption struct { User string Password string Host string Port int CcnetDbName string SeafileDbName string CaPath string UseTLS bool SkipVerify bool Charset string DBEngine string } func initDefaultOptions() { Host = "0.0.0.0" Port = 8082 FixedBlockSize = 1 << 23 MaxIndexingThreads = 1 WebTokenExpireTime = 7200 ClusterSharedTempFileMode = 0600 DefaultQuota = InfiniteQuota FsCacheLimit = 4 << 30 VerifyClientBlocks = true FsIdListRequestTimeout = -1 DBOpTimeout = 60 * time.Second RedisHost = "127.0.0.1" RedisPort = 6379 RedisExpiry = 24 * 3600 RedisMaxConn = 100 RedisTimeout = 1 * time.Second MaxIndexingFiles = 10 } func LoadFileServerOptions(centralDir string) { initDefaultOptions() seafileConfPath := filepath.Join(centralDir, "seafile.conf") opts := ini.LoadOptions{} opts.SpaceBeforeInlineComment = true config, err := ini.LoadSources(opts, seafileConfPath) if err != nil { log.Fatalf("Failed to load seafile.conf: %v", err) } CloudMode = false if section, err := config.GetSection("general"); err == nil { if key, err := section.GetKey("cloud_mode"); err == nil { CloudMode, _ = key.Bool() } } notifServer := os.Getenv("INNER_NOTIFICATION_SERVER_URL") enableNotifServer := os.Getenv("ENABLE_NOTIFICATION_SERVER") if notifServer != "" && enableNotifServer == "true" { NotificationURL = notifServer EnableNotification = true } if section, err := config.GetSection("httpserver"); err == nil { parseFileServerSection(section) } if section, err := config.GetSection("fileserver"); err == nil { parseFileServerSection(section) } if section, err := config.GetSection("quota"); err == nil { if key, err := section.GetKey("default"); err == nil { quotaStr := key.String() DefaultQuota = parseQuota(quotaStr) } } loadCacheOptionFromEnv() GroupTableName = os.Getenv("SEAFILE_MYSQL_DB_GROUP_TABLE_NAME") if GroupTableName == "" { GroupTableName = "Group" } NodeName = os.Getenv("NODE_NAME") if NodeName == "" { NodeName = "default" } } func parseFileServerSection(section *ini.Section) { if key, err := section.GetKey("host"); err == nil { Host = key.String() } if key, err := section.GetKey("port"); err == nil { port, err := key.Uint() if err == nil { Port = uint32(port) } } if key, err := section.GetKey("max_upload_size"); err == nil { size, err := key.Uint() if err == nil { MaxUploadSize = uint64(size) * 1000000 } } if key, err := section.GetKey("max_indexing_threads"); err == nil { threads, err := key.Uint() if err == nil { MaxIndexingThreads = uint32(threads) } } if key, err := section.GetKey("fixed_block_size"); err == nil { blkSize, err := key.Uint64() if err == nil { FixedBlockSize = blkSize * (1 << 20) } } if key, err := section.GetKey("web_token_expire_time"); err == nil { expire, err := key.Uint() if err == nil { WebTokenExpireTime = uint32(expire) } } if key, err := section.GetKey("cluster_shared_temp_file_mode"); err == nil { fileMode, err := key.Uint() if err == nil { ClusterSharedTempFileMode = uint32(fileMode) } } if key, err := section.GetKey("enable_profiling"); err == nil { EnableProfiling, _ = key.Bool() } if EnableProfiling { if key, err := section.GetKey("profile_password"); err == nil { ProfilePassword = key.String() } else { log.Fatal("password of profiling must be specified.") } } if key, err := section.GetKey("go_log_level"); err == nil { LogLevel = key.String() } if key, err := section.GetKey("fs_cache_limit"); err == nil { fsCacheLimit, err := key.Int64() if err == nil { FsCacheLimit = fsCacheLimit * 1024 * 1024 } } // The ratio of physical memory consumption and fs objects is about 4:1, // and this part of memory is generally not subject to GC. So the value is // divided by 4. FsCacheLimit = FsCacheLimit / 4 if key, err := section.GetKey("fs_id_list_request_timeout"); err == nil { fsIdListRequestTimeout, err := key.Int64() if err == nil { FsIdListRequestTimeout = fsIdListRequestTimeout } } if key, err := section.GetKey("verify_client_blocks_after_sync"); err == nil { VerifyClientBlocks, _ = key.Bool() } if key, err := section.GetKey("max_indexing_files"); err == nil { threads, err := key.Uint() if err == nil && threads > 0 { MaxIndexingFiles = uint32(threads) } } } func parseQuota(quotaStr string) int64 { var quota int64 var multiplier int64 = GB if end := strings.Index(quotaStr, "kb"); end > 0 { multiplier = KB quotaInt, err := strconv.ParseInt(quotaStr[:end], 10, 0) if err != nil { return InfiniteQuota } quota = quotaInt * multiplier } else if end := strings.Index(quotaStr, "mb"); end > 0 { multiplier = MB quotaInt, err := strconv.ParseInt(quotaStr[:end], 10, 0) if err != nil { return InfiniteQuota } quota = quotaInt * multiplier } else if end := strings.Index(quotaStr, "gb"); end > 0 { multiplier = GB quotaInt, err := strconv.ParseInt(quotaStr[:end], 10, 0) if err != nil { return InfiniteQuota } quota = quotaInt * multiplier } else if end := strings.Index(quotaStr, "tb"); end > 0 { multiplier = TB quotaInt, err := strconv.ParseInt(quotaStr[:end], 10, 0) if err != nil { return InfiniteQuota } quota = quotaInt * multiplier } else { quotaInt, err := strconv.ParseInt(quotaStr, 10, 0) if err != nil { return InfiniteQuota } quota = quotaInt * multiplier } return quota } func loadCacheOptionFromEnv() { cacheProvider := os.Getenv("CACHE_PROVIDER") if cacheProvider != "redis" { return } HasRedisOptions = true redisHost := os.Getenv("REDIS_HOST") if redisHost != "" { RedisHost = redisHost } redisPort := os.Getenv("REDIS_PORT") if redisPort != "" { port, err := strconv.ParseUint(redisPort, 10, 32) if err != nil { RedisPort = uint32(port) } } redisPasswd := os.Getenv("REDIS_PASSWORD") if redisPasswd != "" { RedisPasswd = redisPasswd } redisMaxConn := os.Getenv("REDIS_MAX_CONNECTIONS") if redisMaxConn != "" { maxConn, err := strconv.ParseUint(redisMaxConn, 10, 32) if err != nil { RedisMaxConn = uint32(maxConn) } } redisExpiry := os.Getenv("REDIS_EXPIRY") if redisExpiry != "" { expiry, err := strconv.ParseUint(redisExpiry, 10, 32) if err != nil { RedisExpiry = uint32(expiry) } } } func LoadSeahubConfig() error { JWTPrivateKey = os.Getenv("JWT_PRIVATE_KEY") if JWTPrivateKey == "" { return fmt.Errorf("failed to read JWT_PRIVATE_KEY") } siteRoot := os.Getenv("SITE_ROOT") if siteRoot != "" { SeahubURL = fmt.Sprintf("http://127.0.0.1:8000%sapi/v2.1/internal", siteRoot) } else { SeahubURL = "http://127.0.0.1:8000/api/v2.1/internal" } return nil } func LoadDBOption(centralDir string) (*DBOption, error) { dbOpt, err := loadDBOptionFromFile(centralDir) if err != nil { log.Warnf("failed to load database config: %v", err) } dbOpt = loadDBOptionFromEnv(dbOpt) if dbOpt.Host == "" { return nil, fmt.Errorf("no database host in seafile.conf.") } if dbOpt.User == "" { return nil, fmt.Errorf("no database user in seafile.conf.") } if dbOpt.Password == "" { return nil, fmt.Errorf("no database password in seafile.conf.") } DBType = dbOpt.DBEngine return dbOpt, nil } func loadDBOptionFromFile(centralDir string) (*DBOption, error) { dbOpt := new(DBOption) dbOpt.DBEngine = "mysql" seafileConfPath := filepath.Join(centralDir, "seafile.conf") opts := ini.LoadOptions{} opts.SpaceBeforeInlineComment = true config, err := ini.LoadSources(opts, seafileConfPath) if err != nil { return nil, fmt.Errorf("failed to load seafile.conf: %v", err) } section, err := config.GetSection("database") if err != nil { return dbOpt, nil } dbEngine := "mysql" key, err := section.GetKey("type") if err == nil { dbEngine = key.String() } if dbEngine != "mysql" { return nil, fmt.Errorf("unsupported database %s.", dbEngine) } dbOpt.DBEngine = dbEngine if key, err = section.GetKey("host"); err == nil { dbOpt.Host = key.String() } // user is required. if key, err = section.GetKey("user"); err == nil { dbOpt.User = key.String() } if key, err = section.GetKey("password"); err == nil { dbOpt.Password = key.String() } if key, err = section.GetKey("db_name"); err == nil { dbOpt.SeafileDbName = key.String() } port := 3306 if key, err = section.GetKey("port"); err == nil { port, _ = key.Int() } dbOpt.Port = port useTLS := false if key, err = section.GetKey("use_ssl"); err == nil { useTLS, _ = key.Bool() } dbOpt.UseTLS = useTLS skipVerify := false if key, err = section.GetKey("skip_verify"); err == nil { skipVerify, _ = key.Bool() } dbOpt.SkipVerify = skipVerify if key, err = section.GetKey("ca_path"); err == nil { dbOpt.CaPath = key.String() } if key, err = section.GetKey("connection_charset"); err == nil { dbOpt.Charset = key.String() } return dbOpt, nil } func loadDBOptionFromEnv(dbOpt *DBOption) *DBOption { user := os.Getenv("SEAFILE_MYSQL_DB_USER") password := os.Getenv("SEAFILE_MYSQL_DB_PASSWORD") host := os.Getenv("SEAFILE_MYSQL_DB_HOST") portStr := os.Getenv("SEAFILE_MYSQL_DB_PORT") ccnetDbName := os.Getenv("SEAFILE_MYSQL_DB_CCNET_DB_NAME") seafileDbName := os.Getenv("SEAFILE_MYSQL_DB_SEAFILE_DB_NAME") if dbOpt == nil { dbOpt = new(DBOption) } if user != "" { dbOpt.User = user } if password != "" { dbOpt.Password = password } if host != "" { dbOpt.Host = host } if portStr != "" { port, _ := strconv.ParseUint(portStr, 10, 32) if port > 0 { dbOpt.Port = int(port) } } if dbOpt.Port == 0 { dbOpt.Port = 3306 } if ccnetDbName != "" { dbOpt.CcnetDbName = ccnetDbName } else if dbOpt.CcnetDbName == "" { dbOpt.CcnetDbName = "ccnet_db" log.Infof("Failed to read SEAFILE_MYSQL_DB_CCNET_DB_NAME, use ccnet_db by default") } if seafileDbName != "" { dbOpt.SeafileDbName = seafileDbName } else if dbOpt.SeafileDbName == "" { dbOpt.SeafileDbName = "seafile_db" log.Infof("Failed to read SEAFILE_MYSQL_DB_SEAFILE_DB_NAME, use seafile_db by default") } return dbOpt } ================================================ FILE: fileserver/quota.go ================================================ package main import ( "context" "database/sql" "fmt" "github.com/haiwen/seafile-server/fileserver/option" "github.com/haiwen/seafile-server/fileserver/repomgr" ) // InfiniteQuota indicates that the quota is unlimited. const ( InfiniteQuota = -2 ) func checkQuota(repoID string, delta int64) (int, error) { if repoID == "" { err := fmt.Errorf("bad argumets") return -1, err } vInfo, err := repomgr.GetVirtualRepoInfo(repoID) if err != nil { err := fmt.Errorf("failed to get virtual repo: %v", err) return -1, err } rRepoID := repoID if vInfo != nil { rRepoID = vInfo.OriginRepoID } user, err := repomgr.GetRepoOwner(rRepoID) if err != nil { err := fmt.Errorf("failed to get repo owner: %v", err) return -1, err } if user == "" { err := fmt.Errorf("repo %s has no owner", repoID) return -1, err } quota, err := getUserQuota(user) if err != nil { err := fmt.Errorf("failed to get user quota: %v", err) return -1, err } if quota == InfiniteQuota { return 0, nil } usage, err := getUserUsage(user) if err != nil || usage < 0 { err := fmt.Errorf("failed to get user usage: %v", err) return -1, err } usage += delta if usage >= quota { return 1, nil } return 0, nil } func getUserQuota(user string) (int64, error) { var quota int64 sqlStr := "SELECT quota FROM UserQuota WHERE user=?" ctx, cancel := context.WithTimeout(context.Background(), option.DBOpTimeout) defer cancel() row := seafileDB.QueryRowContext(ctx, sqlStr, user) if err := row.Scan("a); err != nil { if err != sql.ErrNoRows { return -1, err } } if quota <= 0 { quota = option.DefaultQuota } return quota, nil } func getUserUsage(user string) (int64, error) { var usage sql.NullInt64 sqlStr := "SELECT SUM(size) FROM " + "RepoOwner o LEFT JOIN VirtualRepo v ON o.repo_id=v.repo_id, " + "RepoSize WHERE " + "owner_id=? AND o.repo_id=RepoSize.repo_id " + "AND v.repo_id IS NULL" ctx, cancel := context.WithTimeout(context.Background(), option.DBOpTimeout) defer cancel() row := seafileDB.QueryRowContext(ctx, sqlStr, user) if err := row.Scan(&usage); err != nil { if err != sql.ErrNoRows { return -1, err } } if usage.Valid { return usage.Int64, nil } return 0, nil } ================================================ FILE: fileserver/repomgr/repomgr.go ================================================ // Package repomgr manages repo objects and file operations in repos. package repomgr import ( "context" "database/sql" "fmt" "time" // Change to non-blank imports when use _ "github.com/haiwen/seafile-server/fileserver/blockmgr" "github.com/haiwen/seafile-server/fileserver/commitmgr" "github.com/haiwen/seafile-server/fileserver/option" log "github.com/sirupsen/logrus" ) // Repo status const ( RepoStatusNormal = iota RepoStatusReadOnly NRepoStatus ) // Repo contains information about a repo. type Repo struct { ID string Name string Desc string LastModifier string LastModificationTime int64 HeadCommitID string RootID string IsCorrupted bool // Set when repo is virtual VirtualInfo *VRepoInfo // ID for fs and block store StoreID string // Encrypted repo info IsEncrypted bool EncVersion int Magic string RandomKey string Salt string PwdHash string PwdHashAlgo string PwdHashParams string Version int } // VRepoInfo contains virtual repo information. type VRepoInfo struct { RepoID string OriginRepoID string Path string BaseCommitID string } var seafileDB *sql.DB // Init initialize status of repomgr package func Init(seafDB *sql.DB) { seafileDB = seafDB } // Get returns Repo object by repo ID. func Get(id string) *Repo { query := `SELECT r.repo_id, b.commit_id, v.origin_repo, v.path, v.base_commit FROM ` + `Repo r LEFT JOIN Branch b ON r.repo_id = b.repo_id ` + `LEFT JOIN VirtualRepo v ON r.repo_id = v.repo_id ` + `WHERE r.repo_id = ? AND b.name = 'master'` ctx, cancel := context.WithTimeout(context.Background(), option.DBOpTimeout) defer cancel() stmt, err := seafileDB.PrepareContext(ctx, query) if err != nil { log.Errorf("failed to prepare sql : %s :%v", query, err) return nil } defer stmt.Close() rows, err := stmt.QueryContext(ctx, id) if err != nil { log.Errorf("failed to query sql : %v", err) return nil } defer rows.Close() repo := new(Repo) var originRepoID sql.NullString var path sql.NullString var baseCommitID sql.NullString if rows.Next() { err := rows.Scan(&repo.ID, &repo.HeadCommitID, &originRepoID, &path, &baseCommitID) if err != nil { log.Errorf("failed to scan sql rows : %v", err) return nil } } else { return nil } if repo.HeadCommitID == "" { log.Errorf("repo %s is corrupted", id) return nil } if originRepoID.Valid { repo.VirtualInfo = new(VRepoInfo) repo.VirtualInfo.RepoID = id repo.VirtualInfo.OriginRepoID = originRepoID.String repo.StoreID = originRepoID.String if path.Valid { repo.VirtualInfo.Path = path.String } if baseCommitID.Valid { repo.VirtualInfo.BaseCommitID = baseCommitID.String } } else { repo.StoreID = repo.ID } commit, err := commitmgr.Load(repo.ID, repo.HeadCommitID) if err != nil { log.Errorf("failed to load commit %s/%s : %v", repo.ID, repo.HeadCommitID, err) return nil } repo.Name = commit.RepoName repo.Desc = commit.RepoDesc repo.LastModifier = commit.CreatorName repo.LastModificationTime = commit.Ctime repo.RootID = commit.RootID repo.Version = commit.Version if commit.Encrypted == "true" { repo.IsEncrypted = true repo.EncVersion = commit.EncVersion if repo.EncVersion == 1 && commit.PwdHash == "" { repo.Magic = commit.Magic } else if repo.EncVersion == 2 { repo.RandomKey = commit.RandomKey } else if repo.EncVersion == 3 { repo.RandomKey = commit.RandomKey repo.Salt = commit.Salt } else if repo.EncVersion == 4 { repo.RandomKey = commit.RandomKey repo.Salt = commit.Salt } if repo.EncVersion >= 2 && commit.PwdHash == "" { repo.Magic = commit.Magic } if commit.PwdHash != "" { repo.PwdHash = commit.PwdHash repo.PwdHashAlgo = commit.PwdHashAlgo repo.PwdHashParams = commit.PwdHashParams } } return repo } // RepoToCommit converts Repo to Commit. func RepoToCommit(repo *Repo, commit *commitmgr.Commit) { commit.RepoID = repo.ID commit.RepoName = repo.Name if repo.IsEncrypted { commit.Encrypted = "true" commit.EncVersion = repo.EncVersion if repo.EncVersion == 1 && repo.PwdHash == "" { commit.Magic = repo.Magic } else if repo.EncVersion == 2 { commit.RandomKey = repo.RandomKey } else if repo.EncVersion == 3 { commit.RandomKey = repo.RandomKey commit.Salt = repo.Salt } else if repo.EncVersion == 4 { commit.RandomKey = repo.RandomKey commit.Salt = repo.Salt } if repo.EncVersion >= 2 && repo.PwdHash == "" { commit.Magic = repo.Magic } if repo.PwdHash != "" { commit.PwdHash = repo.PwdHash commit.PwdHashAlgo = repo.PwdHashAlgo commit.PwdHashParams = repo.PwdHashParams } } else { commit.Encrypted = "false" } commit.Version = repo.Version } // GetEx return repo object even if it's corrupted. func GetEx(id string) *Repo { repo := new(Repo) query := `SELECT r.repo_id, b.commit_id, v.origin_repo, v.path, v.base_commit FROM ` + `Repo r LEFT JOIN Branch b ON r.repo_id = b.repo_id ` + `LEFT JOIN VirtualRepo v ON r.repo_id = v.repo_id ` + `WHERE r.repo_id = ? AND b.name = 'master'` ctx, cancel := context.WithTimeout(context.Background(), option.DBOpTimeout) defer cancel() stmt, err := seafileDB.PrepareContext(ctx, query) if err != nil { repo.IsCorrupted = true return repo } defer stmt.Close() rows, err := stmt.QueryContext(ctx, id) if err != nil { repo.IsCorrupted = true return repo } defer rows.Close() var originRepoID sql.NullString var path sql.NullString var baseCommitID sql.NullString if rows.Next() { err := rows.Scan(&repo.ID, &repo.HeadCommitID, &originRepoID, &path, &baseCommitID) if err != nil { repo.IsCorrupted = true return repo } } else if rows.Err() != nil { repo.IsCorrupted = true return repo } else { return nil } if originRepoID.Valid { repo.VirtualInfo = new(VRepoInfo) repo.VirtualInfo.RepoID = id repo.VirtualInfo.OriginRepoID = originRepoID.String repo.StoreID = originRepoID.String if path.Valid { repo.VirtualInfo.Path = path.String } if baseCommitID.Valid { repo.VirtualInfo.BaseCommitID = baseCommitID.String } } else { repo.StoreID = repo.ID } if repo.HeadCommitID == "" { repo.IsCorrupted = true return repo } commit, err := commitmgr.Load(repo.ID, repo.HeadCommitID) if err != nil { log.Errorf("failed to load commit %s/%s : %v", repo.ID, repo.HeadCommitID, err) repo.IsCorrupted = true return repo } repo.Name = commit.RepoName repo.LastModifier = commit.CreatorName repo.LastModificationTime = commit.Ctime repo.RootID = commit.RootID repo.Version = commit.Version if commit.Encrypted == "true" { repo.IsEncrypted = true repo.EncVersion = commit.EncVersion if repo.EncVersion == 1 { repo.Magic = commit.Magic } else if repo.EncVersion == 2 { repo.Magic = commit.Magic repo.RandomKey = commit.RandomKey } else if repo.EncVersion == 3 { repo.Magic = commit.Magic repo.RandomKey = commit.RandomKey repo.Salt = commit.Salt } else if repo.EncVersion == 4 { repo.Magic = commit.Magic repo.RandomKey = commit.RandomKey repo.Salt = commit.Salt } if commit.PwdHash != "" { repo.PwdHash = commit.PwdHash repo.PwdHashAlgo = commit.PwdHashAlgo repo.PwdHashParams = commit.PwdHashParams } } return repo } // GetVirtualRepoInfo return virtual repo info by repo id. func GetVirtualRepoInfo(repoID string) (*VRepoInfo, error) { sqlStr := "SELECT repo_id, origin_repo, path, base_commit FROM VirtualRepo WHERE repo_id = ?" vRepoInfo := new(VRepoInfo) ctx, cancel := context.WithTimeout(context.Background(), option.DBOpTimeout) defer cancel() row := seafileDB.QueryRowContext(ctx, sqlStr, repoID) if err := row.Scan(&vRepoInfo.RepoID, &vRepoInfo.OriginRepoID, &vRepoInfo.Path, &vRepoInfo.BaseCommitID); err != nil { if err != sql.ErrNoRows { return nil, err } return nil, nil } return vRepoInfo, nil } // GetVirtualRepoInfoByOrigin return virtual repo info by origin repo id. func GetVirtualRepoInfoByOrigin(originRepo string) ([]*VRepoInfo, error) { sqlStr := "SELECT repo_id, origin_repo, path, base_commit " + "FROM VirtualRepo WHERE origin_repo=?" var vRepos []*VRepoInfo ctx, cancel := context.WithTimeout(context.Background(), option.DBOpTimeout) defer cancel() row, err := seafileDB.QueryContext(ctx, sqlStr, originRepo) if err != nil { return nil, err } defer row.Close() for row.Next() { vRepoInfo := new(VRepoInfo) if err := row.Scan(&vRepoInfo.RepoID, &vRepoInfo.OriginRepoID, &vRepoInfo.Path, &vRepoInfo.BaseCommitID); err != nil { if err != sql.ErrNoRows { return nil, err } } vRepos = append(vRepos, vRepoInfo) } return vRepos, nil } // GetEmailByToken return user's email by token. func GetEmailByToken(repoID string, token string) (string, error) { var email string sqlStr := "SELECT email FROM RepoUserToken WHERE repo_id = ? AND token = ?" ctx, cancel := context.WithTimeout(context.Background(), option.DBOpTimeout) defer cancel() row := seafileDB.QueryRowContext(ctx, sqlStr, repoID, token) if err := row.Scan(&email); err != nil { if err != sql.ErrNoRows { return email, err } } return email, nil } // GetRepoStatus return repo status by repo id. func GetRepoStatus(repoID string) (int, error) { var status int = -1 // First, check origin repo's status. sqlStr := "SELECT i.status FROM VirtualRepo v LEFT JOIN RepoInfo i " + "ON i.repo_id=v.origin_repo WHERE v.repo_id=? " + "AND i.repo_id IS NOT NULL" ctx, cancel := context.WithTimeout(context.Background(), option.DBOpTimeout) defer cancel() row := seafileDB.QueryRowContext(ctx, sqlStr, repoID) if err := row.Scan(&status); err != nil { if err != sql.ErrNoRows { return status, err } else { status = -1 } } if status >= 0 { return status, nil } // Then, check repo's own status. sqlStr = "SELECT status FROM RepoInfo WHERE repo_id=?" row = seafileDB.QueryRowContext(ctx, sqlStr, repoID) if err := row.Scan(&status); err != nil { if err != sql.ErrNoRows { return status, err } } return status, nil } // TokenPeerInfoExists check if the token exists. func TokenPeerInfoExists(token string) (bool, error) { var exists string sqlStr := "SELECT token FROM RepoTokenPeerInfo WHERE token=?" ctx, cancel := context.WithTimeout(context.Background(), option.DBOpTimeout) defer cancel() row := seafileDB.QueryRowContext(ctx, sqlStr, token) if err := row.Scan(&exists); err != nil { if err != sql.ErrNoRows { return false, err } return false, nil } return true, nil } // AddTokenPeerInfo add token peer info to RepoTokenPeerInfo table. func AddTokenPeerInfo(token, peerID, peerIP, peerName, clientVer string, syncTime int64) error { sqlStr := "INSERT INTO RepoTokenPeerInfo (token, peer_id, peer_ip, peer_name, sync_time, client_ver)" + "VALUES (?, ?, ?, ?, ?, ?)" ctx, cancel := context.WithTimeout(context.Background(), option.DBOpTimeout) defer cancel() if _, err := seafileDB.ExecContext(ctx, sqlStr, token, peerID, peerIP, peerName, syncTime, clientVer); err != nil { return err } return nil } // UpdateTokenPeerInfo update token peer info to RepoTokenPeerInfo table. func UpdateTokenPeerInfo(token, peerID, clientVer string, syncTime int64) error { sqlStr := "UPDATE RepoTokenPeerInfo SET " + "peer_ip=?, sync_time=?, client_ver=? WHERE token=?" ctx, cancel := context.WithTimeout(context.Background(), option.DBOpTimeout) defer cancel() if _, err := seafileDB.ExecContext(ctx, sqlStr, peerID, syncTime, clientVer, token); err != nil { return err } return nil } // GetUploadTmpFile gets the timp file path of upload file. func GetUploadTmpFile(repoID, filePath string) (string, error) { var filePathNoSlash string if filePath[0] == '/' { filePathNoSlash = filePath[1:] } else { filePathNoSlash = filePath filePath = "/" + filePath } var tmpFile string sqlStr := "SELECT tmp_file_path FROM WebUploadTempFiles WHERE repo_id = ? AND file_path = ?" ctx, cancel := context.WithTimeout(context.Background(), option.DBOpTimeout) defer cancel() row := seafileDB.QueryRowContext(ctx, sqlStr, repoID, filePath) if err := row.Scan(&tmpFile); err != nil { if err != sql.ErrNoRows { return "", err } } if tmpFile == "" { row := seafileDB.QueryRowContext(ctx, sqlStr, repoID, filePathNoSlash) if err := row.Scan(&tmpFile); err != nil { if err != sql.ErrNoRows { return "", err } } } return tmpFile, nil } // AddUploadTmpFile adds the tmp file path of upload file. func AddUploadTmpFile(repoID, filePath, tmpFile string) error { if filePath[0] != '/' { filePath = "/" + filePath } sqlStr := "INSERT INTO WebUploadTempFiles (repo_id, file_path, tmp_file_path) VALUES (?, ?, ?)" ctx, cancel := context.WithTimeout(context.Background(), option.DBOpTimeout) defer cancel() _, err := seafileDB.ExecContext(ctx, sqlStr, repoID, filePath, tmpFile) if err != nil { return err } return nil } // DelUploadTmpFile deletes the tmp file path of upload file. func DelUploadTmpFile(repoID, filePath string) error { var filePathNoSlash string if filePath[0] == '/' { filePathNoSlash = filePath[1:] } else { filePathNoSlash = filePath filePath = "/" + filePath } sqlStr := "DELETE FROM WebUploadTempFiles WHERE repo_id = ? AND file_path IN (?, ?)" ctx, cancel := context.WithTimeout(context.Background(), option.DBOpTimeout) defer cancel() _, err := seafileDB.ExecContext(ctx, sqlStr, repoID, filePath, filePathNoSlash) if err != nil { return err } return nil } func setRepoCommitToDb(repoID, repoName string, updateTime int64, version int, isEncrypted string, lastModifier string) error { var exists int var encrypted int sqlStr := "SELECT 1 FROM RepoInfo WHERE repo_id=?" ctx, cancel := context.WithTimeout(context.Background(), option.DBOpTimeout) defer cancel() row := seafileDB.QueryRowContext(ctx, sqlStr, repoID) if err := row.Scan(&exists); err != nil { if err != sql.ErrNoRows { return err } } if updateTime == 0 { updateTime = time.Now().Unix() } if isEncrypted == "true" { encrypted = 1 } if exists == 1 { sqlStr := "UPDATE RepoInfo SET name=?, update_time=?, version=?, is_encrypted=?, " + "last_modifier=? WHERE repo_id=?" if _, err := seafileDB.ExecContext(ctx, sqlStr, repoName, updateTime, version, encrypted, lastModifier, repoID); err != nil { return err } } else { sqlStr := "INSERT INTO RepoInfo (repo_id, name, update_time, version, is_encrypted, last_modifier) " + "VALUES (?, ?, ?, ?, ?, ?)" if _, err := seafileDB.ExecContext(ctx, sqlStr, repoID, repoName, updateTime, version, encrypted, lastModifier); err != nil { return err } } return nil } // SetVirtualRepoBaseCommitPath updates the table of VirtualRepo. func SetVirtualRepoBaseCommitPath(repoID, baseCommitID, newPath string) error { sqlStr := "UPDATE VirtualRepo SET base_commit=?, path=? WHERE repo_id=?" ctx, cancel := context.WithTimeout(context.Background(), option.DBOpTimeout) defer cancel() if _, err := seafileDB.ExecContext(ctx, sqlStr, baseCommitID, newPath, repoID); err != nil { return err } return nil } // GetVirtualRepoIDsByOrigin return the virtual repo ids by origin repo id. func GetVirtualRepoIDsByOrigin(repoID string) ([]string, error) { sqlStr := "SELECT repo_id FROM VirtualRepo WHERE origin_repo=?" var id string var ids []string ctx, cancel := context.WithTimeout(context.Background(), option.DBOpTimeout) defer cancel() row, err := seafileDB.QueryContext(ctx, sqlStr, repoID) if err != nil { return nil, err } defer row.Close() for row.Next() { if err := row.Scan(&id); err != nil { if err != sql.ErrNoRows { return nil, err } } ids = append(ids, id) } return ids, nil } // DelVirtualRepo deletes virtual repo from database. func DelVirtualRepo(repoID string, cloudMode bool) error { err := removeVirtualRepoOndisk(repoID, cloudMode) if err != nil { err := fmt.Errorf("failed to remove virtual repo on disk: %v", err) return err } sqlStr := "DELETE FROM VirtualRepo WHERE repo_id = ?" ctx, cancel := context.WithTimeout(context.Background(), option.DBOpTimeout) defer cancel() _, err = seafileDB.ExecContext(ctx, sqlStr, repoID) if err != nil { return err } return nil } func removeVirtualRepoOndisk(repoID string, cloudMode bool) error { sqlStr := "DELETE FROM Repo WHERE repo_id = ?" ctx, cancel := context.WithTimeout(context.Background(), option.DBOpTimeout) defer cancel() _, err := seafileDB.ExecContext(ctx, sqlStr, repoID) if err != nil { return err } sqlStr = "SELECT name, repo_id, commit_id FROM Branch WHERE repo_id=?" rows, err := seafileDB.QueryContext(ctx, sqlStr, repoID) if err != nil { return err } defer rows.Close() for rows.Next() { var name, id, commitID string if err := rows.Scan(&name, &id, &commitID); err != nil { if err != sql.ErrNoRows { return err } } sqlStr := "DELETE FROM RepoHead WHERE branch_name = ? AND repo_id = ?" _, err := seafileDB.ExecContext(ctx, sqlStr, name, id) if err != nil { return err } sqlStr = "DELETE FROM Branch WHERE name=? AND repo_id=?" _, err = seafileDB.ExecContext(ctx, sqlStr, name, id) if err != nil { return err } } sqlStr = "DELETE FROM RepoOwner WHERE repo_id = ?" _, err = seafileDB.ExecContext(ctx, sqlStr, repoID) if err != nil { return err } sqlStr = "DELETE FROM SharedRepo WHERE repo_id = ?" _, err = seafileDB.ExecContext(ctx, sqlStr, repoID) if err != nil { return err } sqlStr = "DELETE FROM RepoGroup WHERE repo_id = ?" _, err = seafileDB.ExecContext(ctx, sqlStr, repoID) if err != nil { return err } if !cloudMode { sqlStr = "DELETE FROM InnerPubRepo WHERE repo_id = ?" _, err := seafileDB.ExecContext(ctx, sqlStr, repoID) if err != nil { return err } } sqlStr = "DELETE FROM RepoUserToken WHERE repo_id = ?" _, err = seafileDB.ExecContext(ctx, sqlStr, repoID) if err != nil { return err } sqlStr = "DELETE FROM RepoValidSince WHERE repo_id = ?" _, err = seafileDB.ExecContext(ctx, sqlStr, repoID) if err != nil { return err } sqlStr = "DELETE FROM RepoSize WHERE repo_id = ?" _, err = seafileDB.ExecContext(ctx, sqlStr, repoID) if err != nil { return err } var exists int sqlStr = "SELECT 1 FROM GarbageRepos WHERE repo_id=?" row := seafileDB.QueryRowContext(ctx, sqlStr, repoID) if err := row.Scan(&exists); err != nil { if err != sql.ErrNoRows { return err } } if exists == 0 { sqlStr = "INSERT INTO GarbageRepos (repo_id) VALUES (?)" _, err := seafileDB.ExecContext(ctx, sqlStr, repoID) if err != nil { return err } } else { sqlStr = "REPLACE INTO GarbageRepos (repo_id) VALUES (?)" _, err := seafileDB.ExecContext(ctx, sqlStr, repoID) if err != nil { return err } } return nil } // IsVirtualRepo check if the repo is a virtual reop. func IsVirtualRepo(repoID string) (bool, error) { var exists int sqlStr := "SELECT 1 FROM VirtualRepo WHERE repo_id = ?" ctx, cancel := context.WithTimeout(context.Background(), option.DBOpTimeout) defer cancel() row := seafileDB.QueryRowContext(ctx, sqlStr, repoID) if err := row.Scan(&exists); err != nil { if err != sql.ErrNoRows { return false, err } return false, nil } return true, nil } // GetRepoOwner get the owner of repo. func GetRepoOwner(repoID string) (string, error) { var owner string sqlStr := "SELECT owner_id FROM RepoOwner WHERE repo_id=?" ctx, cancel := context.WithTimeout(context.Background(), option.DBOpTimeout) defer cancel() row := seafileDB.QueryRowContext(ctx, sqlStr, repoID) if err := row.Scan(&owner); err != nil { if err != sql.ErrNoRows { return "", err } } return owner, nil } func UpdateRepoInfo(repoID, commitID string) error { head, err := commitmgr.Load(repoID, commitID) if err != nil { err := fmt.Errorf("failed to get commit %s:%s", repoID, commitID) return err } setRepoCommitToDb(repoID, head.RepoName, head.Ctime, head.Version, head.Encrypted, head.CreatorName) return nil } func HasLastGCID(repoID, clientID string) (bool, error) { sqlStr := "SELECT 1 FROM LastGCID WHERE repo_id = ? AND client_id = ?" var exist int ctx, cancel := context.WithTimeout(context.Background(), option.DBOpTimeout) defer cancel() row := seafileDB.QueryRowContext(ctx, sqlStr, repoID, clientID) if err := row.Scan(&exist); err != nil { if err != sql.ErrNoRows { return false, err } } if exist == 0 { return false, nil } return true, nil } func GetLastGCID(repoID, clientID string) (string, error) { sqlStr := "SELECT gc_id FROM LastGCID WHERE repo_id = ? AND client_id = ?" var gcID sql.NullString ctx, cancel := context.WithTimeout(context.Background(), option.DBOpTimeout) defer cancel() row := seafileDB.QueryRowContext(ctx, sqlStr, repoID, clientID) if err := row.Scan(&gcID); err != nil { if err != sql.ErrNoRows { return "", err } } return gcID.String, nil } func GetCurrentGCID(repoID string) (string, error) { sqlStr := "SELECT gc_id FROM GCID WHERE repo_id = ?" var gcID sql.NullString ctx, cancel := context.WithTimeout(context.Background(), option.DBOpTimeout) defer cancel() row := seafileDB.QueryRowContext(ctx, sqlStr, repoID) if err := row.Scan(&gcID); err != nil { if err != sql.ErrNoRows { return "", err } } return gcID.String, nil } func RemoveLastGCID(repoID, clientID string) error { sqlStr := "DELETE FROM LastGCID WHERE repo_id = ? AND client_id = ?" ctx, cancel := context.WithTimeout(context.Background(), option.DBOpTimeout) defer cancel() if _, err := seafileDB.ExecContext(ctx, sqlStr, repoID, clientID); err != nil { return err } return nil } func SetLastGCID(repoID, clientID, gcID string) error { exist, err := HasLastGCID(repoID, clientID) if err != nil { return err } ctx, cancel := context.WithTimeout(context.Background(), option.DBOpTimeout) defer cancel() if exist { sqlStr := "UPDATE LastGCID SET gc_id = ? WHERE repo_id = ? AND client_id = ?" if _, err = seafileDB.ExecContext(ctx, sqlStr, gcID, repoID, clientID); err != nil { return err } } else { sqlStr := "INSERT INTO LastGCID (repo_id, client_id, gc_id) VALUES (?, ?, ?)" if _, err = seafileDB.ExecContext(ctx, sqlStr, repoID, clientID, gcID); err != nil { return err } } return nil } ================================================ FILE: fileserver/repomgr/repomgr_test.go ================================================ package repomgr import ( "database/sql" "fmt" "os" "testing" _ "github.com/go-sql-driver/mysql" "github.com/haiwen/seafile-server/fileserver/commitmgr" "github.com/haiwen/seafile-server/fileserver/searpc" ) const ( // repoID = "9646f13e-bbab-4eaf-9a84-fb6e1cd776b3" user = "seafile" password = "seafile" host = "127.0.0.1" port = 3306 dbName = "seafile-db" useTLS = false seafileConfPath = "/root/conf" seafileDataDir = "/root/conf/seafile-data" repoName = "repo" userName = "seafile@seafile.com" encVersion = 2 pipePath = "/root/runtime/seafile.sock" service = "seafserv-threaded-rpcserver" ) var repoID string var client *searpc.Client func createRepo() string { id, err := client.Call("seafile_create_repo", repoName, "", userName, nil, encVersion) if err != nil { fmt.Printf("failed to create repo.\n") } if id == nil { fmt.Printf("repo id is nil.\n") os.Exit(1) } repoid, ok := id.(string) if !ok { fmt.Printf("returned value isn't repo id.\n") } return repoid } func delRepo() { _, err := client.Call("seafile_destroy_repo", repoID) if err != nil { fmt.Printf("failed to del repo.\n") os.Exit(1) } } func TestMain(m *testing.M) { client = searpc.Init(pipePath, service, 10) repoID = createRepo() dsn := fmt.Sprintf("%s:%s@tcp(%s:%d)/%s?tls=%t", user, password, host, port, dbName, useTLS) seafDB, err := sql.Open("mysql", dsn) if err != nil { fmt.Printf("Failed to open database: %v", err) } Init(seafDB) commitmgr.Init(seafileConfPath, seafileDataDir) code := m.Run() delRepo() os.Exit(code) } func TestGet(t *testing.T) { repo := Get(repoID) if repo == nil { t.Errorf("failed to get repo : %s.\n", repoID) t.FailNow() } if repo.ID != repoID { t.Errorf("failed to get repo : %s.\n", repoID) } } ================================================ FILE: fileserver/searpc/searpc.go ================================================ // Package searpc implements searpc client protocol with unix pipe transport. package searpc import ( "bufio" "encoding/binary" "encoding/json" "fmt" "io" "net" ) // Client represents a connections to the RPC server. type Client struct { // path of the named pipe pipePath string // RPC service name Service string pool chan *net.UnixConn maxConn int } type request struct { Service string `json:"service"` Request string `json:"request"` } // Init initializes rpc client. func Init(pipePath string, service string, maxConn int) *Client { client := new(Client) client.pipePath = pipePath client.Service = service client.maxConn = maxConn client.pool = make(chan *net.UnixConn, maxConn) return client } // Call calls the RPC function funcname with variadic parameters. // The return value of the RPC function is return as interface{} type // The true returned type can be int32, int64, string, struct (object), list of struct (objects) or JSON func (c *Client) Call(funcname string, params ...interface{}) (interface{}, error) { // TODO: use reflection to compose requests and parse results. conn, err := c.getConn() if err != nil { return nil, err } hasErr := false defer func() { if hasErr { conn.Close() } else { c.returnConn(conn) } }() var req []interface{} req = append(req, funcname) req = append(req, params...) jsonstr, err := json.Marshal(req) if err != nil { hasErr = true err := fmt.Errorf("failed to encode rpc call to json : %v", err) return nil, err } reqHeader := new(request) reqHeader.Service = c.Service reqHeader.Request = string(jsonstr) jsonstr, err = json.Marshal(reqHeader) if err != nil { hasErr = true err := fmt.Errorf("failed to convert object to json : %v", err) return nil, err } header := make([]byte, 4) binary.LittleEndian.PutUint32(header, uint32(len(jsonstr))) _, err = conn.Write([]byte(header)) if err != nil { hasErr = true err := fmt.Errorf("Failed to write rpc request header : %v", err) return nil, err } _, err = conn.Write([]byte(jsonstr)) if err != nil { hasErr = true err := fmt.Errorf("Failed to write rpc request body : %v", err) return nil, err } reader := bufio.NewReader(conn) buflen := make([]byte, 4) _, err = io.ReadFull(reader, buflen) if err != nil { hasErr = true err := fmt.Errorf("failed to read response header from rpc server : %v", err) return nil, err } retlen := binary.LittleEndian.Uint32(buflen) msg := make([]byte, retlen) _, err = io.ReadFull(reader, msg) if err != nil { hasErr = true err := fmt.Errorf("failed to read response body from rpc server : %v", err) return nil, err } retlist := make(map[string]interface{}) err = json.Unmarshal(msg, &retlist) if err != nil { hasErr = true err := fmt.Errorf("failed to decode rpc response : %v", err) return nil, err } if _, ok := retlist["err_code"]; ok { hasErr = true err := fmt.Errorf("searpc server returned error : %v", retlist["err_msg"]) return nil, err } if _, ok := retlist["ret"]; ok { ret := retlist["ret"] return ret, nil } hasErr = true err = fmt.Errorf("No value returned") return nil, err } func (c *Client) getConn() (*net.UnixConn, error) { select { case conn := <-c.pool: return conn, nil default: unixAddr, err := net.ResolveUnixAddr("unix", c.pipePath) if err != nil { err := fmt.Errorf("failed to resolve unix addr when calling rpc : %w", err) return nil, err } conn, err := net.DialUnix("unix", nil, unixAddr) if err != nil { err := fmt.Errorf("failed to dial unix when calling rpc : %v", err) return nil, err } return conn, nil } } func (c *Client) returnConn(conn *net.UnixConn) { select { case c.pool <- conn: default: conn.Close() } } ================================================ FILE: fileserver/searpc/searpc_test.go ================================================ package searpc import ( "os" "testing" ) const ( repoName = "repo" userName = "seafile@seafile.com" encVersion = 2 pipePath = "/root/runtime/seafile.sock" service = "seafserv-threaded-rpcserver" ) var client *Client func TestMain(m *testing.M) { client = Init(pipePath, service, 10) code := m.Run() os.Exit(code) } func TestCallRpc(t *testing.T) { repoID, err := client.Call("seafile_create_repo", repoName, "", userName, nil, encVersion) if err != nil { t.Errorf("failed to create repo.\n") } if repoID == nil { t.Errorf("repo id is nil.\n") t.FailNow() } repo, err := client.Call("seafile_get_repo", repoID) if err != nil { t.Errorf("failed to get repo.\n") } if repo == nil { t.Errorf("repo is nil.\n") t.FailNow() } repoMap, ok := repo.(map[string]interface{}) if !ok { t.Errorf("failed to assert the type.\n") t.FailNow() } if repoMap["id"] != repoID { t.Errorf("wrong repo id.\n") } repoList, err := client.Call("seafile_get_repo_list", -1, -1, "") if err != nil { t.Errorf("failed to get repo list.\n") } if repoList == nil { t.Errorf("repo list is nil.\n") t.FailNow() } var exists bool repos, ok := repoList.([]interface{}) if !ok { t.Errorf("failed to assert the type.\n") t.FailNow() } for _, v := range repos { repo, ok := v.(map[string]interface{}) if !ok { t.Errorf("failed to assert the type.\n") t.FailNow() } if repo["id"] == repoID { exists = true break } } if exists != true { t.Errorf("can't find repo %s in repo list.\n", repoID) } client.Call("seafile_destroy_repo", repoID) } ================================================ FILE: fileserver/share/group/group.go ================================================ // Package group manages group membership and group shares. package group ================================================ FILE: fileserver/share/public/public.go ================================================ // Package public manager inner public shares. package public ================================================ FILE: fileserver/share/share.go ================================================ // Package share manages share relations. // share: manages personal shares and provide high level permission check functions. package share import ( "context" "database/sql" "fmt" "path/filepath" "strconv" "strings" "github.com/haiwen/seafile-server/fileserver/option" "github.com/haiwen/seafile-server/fileserver/repomgr" log "github.com/sirupsen/logrus" ) type group struct { id int groupName string creatorName string timestamp int64 parentGroupID int } var ccnetDB *sql.DB var seafileDB *sql.DB var groupTableName string var cloudMode bool // Init ccnetDB, seafileDB, groupTableName, cloudMode func Init(cnDB *sql.DB, seafDB *sql.DB, grpTableName string, clMode bool) { ccnetDB = cnDB seafileDB = seafDB groupTableName = grpTableName cloudMode = clMode } // CheckPerm get user's repo permission func CheckPerm(repoID string, user string) string { var perm string vInfo, err := repomgr.GetVirtualRepoInfo(repoID) if err != nil { log.Errorf("Failed to get virtual repo info by repo id %s: %v", repoID, err) } if vInfo != nil { perm = checkVirtualRepoPerm(repoID, vInfo.OriginRepoID, user, vInfo.Path) return perm } perm = checkRepoSharePerm(repoID, user) return perm } func checkVirtualRepoPerm(repoID, originRepoID, user, vPath string) string { owner, err := repomgr.GetRepoOwner(originRepoID) if err != nil { log.Errorf("Failed to get repo owner: %v", err) } var perm string if owner != "" && owner == user { perm = "rw" return perm } perm = checkPermOnParentRepo(originRepoID, user, vPath) if perm != "" { return perm } perm = checkRepoSharePerm(originRepoID, user) return perm } func getUserGroups(sqlStr string, args ...interface{}) ([]group, error) { ctx, cancel := context.WithTimeout(context.Background(), option.DBOpTimeout) defer cancel() rows, err := ccnetDB.QueryContext(ctx, sqlStr, args...) if err != nil { return nil, err } defer rows.Close() var groups []group var g group for rows.Next() { if err := rows.Scan(&g.id, &g.groupName, &g.creatorName, &g.timestamp, &g.parentGroupID); err == nil { groups = append(groups, g) } } if err := rows.Err(); err != nil { return nil, err } return groups, nil } func getGroupsByUser(userName string, returnAncestors bool) ([]group, error) { sqlStr := fmt.Sprintf("SELECT g.group_id, group_name, creator_name, timestamp, parent_group_id FROM "+ "`%s` g, GroupUser u WHERE g.group_id = u.group_id AND user_name=? ORDER BY g.group_id DESC", groupTableName) groups, err := getUserGroups(sqlStr, userName) if err != nil { err := fmt.Errorf("Failed to get groups by user %s: %v", userName, err) return nil, err } if !returnAncestors { return groups, nil } sqlStr = "" var ret []group for _, group := range groups { parentGroupID := group.parentGroupID groupID := group.id if parentGroupID != 0 { if sqlStr == "" { sqlStr = fmt.Sprintf("SELECT path FROM GroupStructure WHERE group_id IN (%d", groupID) } else { sqlStr += fmt.Sprintf(", %d", groupID) } } else { ret = append(ret, group) } } if sqlStr != "" { sqlStr += ")" paths, err := getGroupPaths(sqlStr) if err != nil { log.Errorf("Failed to get group paths: %v", err) } if paths == "" { err := fmt.Errorf("Failed to get groups path for user %s", userName) return nil, err } sqlStr = fmt.Sprintf("SELECT g.group_id, group_name, creator_name, timestamp, parent_group_id FROM "+ "`%s` g WHERE g.group_id IN (%s) ORDER BY g.group_id DESC", groupTableName, paths) groups, err := getUserGroups(sqlStr) if err != nil { return nil, err } ret = append(ret, groups...) } return ret, nil } func getGroupPaths(sqlStr string) (string, error) { var paths string ctx, cancel := context.WithTimeout(context.Background(), option.DBOpTimeout) defer cancel() rows, err := ccnetDB.QueryContext(ctx, sqlStr) if err != nil { return paths, err } defer rows.Close() var path string for rows.Next() { rows.Scan(&path) if paths == "" { paths = path } else { paths += fmt.Sprintf(", %s", path) } } if err := rows.Err(); err != nil { return "", err } return paths, nil } func checkGroupPermByUser(repoID string, userName string) (string, error) { groups, err := getGroupsByUser(userName, false) if err != nil { return "", err } if len(groups) == 0 { return "", nil } var sqlBuilder strings.Builder sqlBuilder.WriteString("SELECT permission FROM RepoGroup WHERE repo_id = ? AND group_id IN (") for i := 0; i < len(groups); i++ { sqlBuilder.WriteString(strconv.Itoa(groups[i].id)) if i+1 < len(groups) { sqlBuilder.WriteString(",") } } sqlBuilder.WriteString(")") ctx, cancel := context.WithTimeout(context.Background(), option.DBOpTimeout) defer cancel() rows, err := seafileDB.QueryContext(ctx, sqlBuilder.String(), repoID) if err != nil { err := fmt.Errorf("Failed to get group permission by user %s: %v", userName, err) return "", err } defer rows.Close() var perm string var origPerm string for rows.Next() { if err := rows.Scan(&perm); err == nil { if perm == "rw" { origPerm = perm } else if perm == "r" && origPerm == "" { origPerm = perm } } } if err := rows.Err(); err != nil { err := fmt.Errorf("Failed to get group permission for user %s: %v", userName, err) return "", err } return origPerm, nil } func checkSharedRepoPerm(repoID string, email string) (string, error) { sqlStr := "SELECT permission FROM SharedRepo WHERE repo_id=? AND to_email=?" ctx, cancel := context.WithTimeout(context.Background(), option.DBOpTimeout) defer cancel() row := seafileDB.QueryRowContext(ctx, sqlStr, repoID, email) var perm string if err := row.Scan(&perm); err != nil { if err != sql.ErrNoRows { err := fmt.Errorf("Failed to check shared repo permission: %v", err) return "", err } } return perm, nil } func checkInnerPubRepoPerm(repoID string) (string, error) { sqlStr := "SELECT permission FROM InnerPubRepo WHERE repo_id=?" ctx, cancel := context.WithTimeout(context.Background(), option.DBOpTimeout) defer cancel() row := seafileDB.QueryRowContext(ctx, sqlStr, repoID) var perm string if err := row.Scan(&perm); err != nil { if err != sql.ErrNoRows { err := fmt.Errorf("Failed to check inner public repo permission: %v", err) return "", err } } return perm, nil } func checkRepoSharePerm(repoID string, userName string) string { owner, err := repomgr.GetRepoOwner(repoID) if err != nil { log.Errorf("Failed to get repo owner: %v", err) } if owner != "" && owner == userName { perm := "rw" return perm } perm, err := checkSharedRepoPerm(repoID, userName) if err != nil { log.Errorf("Failed to get shared repo permission: %v", err) } if perm != "" { return perm } perm, err = checkGroupPermByUser(repoID, userName) if err != nil { log.Errorf("Failed to get group permission by user %s: %v", userName, err) } if perm != "" { return perm } if !cloudMode { perm, err = checkInnerPubRepoPerm(repoID) if err != nil { log.Errorf("Failed to get inner pulic repo permission by repo id %s: %v", repoID, err) return "" } return perm } return "" } func getSharedDirsToUser(originRepoID string, toEmail string) (map[string]string, error) { dirs := make(map[string]string) sqlStr := "SELECT v.path, s.permission FROM SharedRepo s, VirtualRepo v WHERE " + "s.repo_id = v.repo_id AND s.to_email = ? AND v.origin_repo = ?" ctx, cancel := context.WithTimeout(context.Background(), option.DBOpTimeout) defer cancel() rows, err := seafileDB.QueryContext(ctx, sqlStr, toEmail, originRepoID) if err != nil { err := fmt.Errorf("Failed to get shared directories by user %s: %v", toEmail, err) return nil, err } defer rows.Close() var path string var perm string for rows.Next() { if err := rows.Scan(&path, &perm); err == nil { dirs[path] = perm } } if err := rows.Err(); err != nil { err := fmt.Errorf("Failed to get shared directories by user %s: %v", toEmail, err) return nil, err } return dirs, nil } func getDirPerm(perms map[string]string, path string) string { tmp := path var perm string // If the path is empty, filepath.Dir returns ".". If the path consists entirely of separators, // filepath.Dir returns a single separator. for tmp != "/" && tmp != "." && tmp != "" { if perm, exists := perms[tmp]; exists { return perm } tmp = filepath.Dir(tmp) } return perm } func convertGroupListToStr(groups []group) string { var groupIDs strings.Builder for i, group := range groups { groupIDs.WriteString(strconv.Itoa(group.id)) if i+1 < len(groups) { groupIDs.WriteString(",") } } return groupIDs.String() } func getSharedDirsToGroup(originRepoID string, groups []group) (map[string]string, error) { dirs := make(map[string]string) groupIDs := convertGroupListToStr(groups) sqlStr := fmt.Sprintf("SELECT v.path, s.permission "+ "FROM RepoGroup s, VirtualRepo v WHERE "+ "s.repo_id = v.repo_id AND v.origin_repo = ? "+ "AND s.group_id in (%s)", groupIDs) ctx, cancel := context.WithTimeout(context.Background(), option.DBOpTimeout) defer cancel() rows, err := seafileDB.QueryContext(ctx, sqlStr, originRepoID) if err != nil { err := fmt.Errorf("Failed to get shared directories: %v", err) return nil, err } defer rows.Close() var path string var perm string for rows.Next() { if err := rows.Scan(&path, &perm); err == nil { dirs[path] = perm } } if err := rows.Err(); err != nil { err := fmt.Errorf("Failed to get shared directories: %v", err) return nil, err } return dirs, nil } func checkPermOnParentRepo(originRepoID, user, vPath string) string { var perm string userPerms, err := getSharedDirsToUser(originRepoID, user) if err != nil { log.Errorf("Failed to get all shared folder perms in parent repo %.8s for user %s", originRepoID, user) return "" } if len(userPerms) > 0 { perm = getDirPerm(userPerms, vPath) if perm != "" { return perm } } groups, err := getGroupsByUser(user, false) if err != nil { log.Errorf("Failed to get groups by user %s: %v", user, err) } if len(groups) == 0 { return perm } groupPerms, err := getSharedDirsToGroup(originRepoID, groups) if err != nil { log.Errorf("Failed to get all shared folder perm from parent repo %.8s to all user groups", originRepoID) return "" } if len(groupPerms) == 0 { return "" } perm = getDirPerm(groupPerms, vPath) return perm } // SharedRepo is a shared repo object type SharedRepo struct { Version int `json:"version"` ID string `json:"id"` HeadCommitID string `json:"head_commit_id"` Name string `json:"name"` MTime int64 `json:"mtime"` Permission string `json:"permission"` Type string `json:"type"` Owner string `json:"owner"` RepoType string `json:"-"` } // GetReposByOwner get repos by owner func GetReposByOwner(email string) ([]*SharedRepo, error) { var repos []*SharedRepo query := "SELECT o.repo_id, b.commit_id, i.name, " + "i.version, i.update_time, i.last_modifier, i.type FROM " + "RepoOwner o LEFT JOIN Branch b ON o.repo_id = b.repo_id " + "LEFT JOIN RepoInfo i ON o.repo_id = i.repo_id " + "LEFT JOIN VirtualRepo v ON o.repo_id = v.repo_id " + "WHERE owner_id=? AND " + "v.repo_id IS NULL " + "ORDER BY i.update_time DESC, o.repo_id" ctx, cancel := context.WithTimeout(context.Background(), option.DBOpTimeout) defer cancel() stmt, err := seafileDB.PrepareContext(ctx, query) if err != nil { return nil, err } defer stmt.Close() rows, err := stmt.QueryContext(ctx, email) if err != nil { return nil, err } defer rows.Close() for rows.Next() { repo := new(SharedRepo) var repoName, lastModifier, repoType sql.NullString if err := rows.Scan(&repo.ID, &repo.HeadCommitID, &repoName, &repo.Version, &repo.MTime, &lastModifier, &repoType); err == nil { if repo.HeadCommitID == "" { continue } if !repoName.Valid || !lastModifier.Valid { continue } if repoName.String == "" || lastModifier.String == "" { continue } repo.Name = repoName.String if repoType.Valid { repo.RepoType = repoType.String } repos = append(repos, repo) } } if err := rows.Err(); err != nil { return nil, err } return repos, nil } // ListInnerPubRepos get inner public repos func ListInnerPubRepos() ([]*SharedRepo, error) { query := "SELECT InnerPubRepo.repo_id, " + "owner_id, permission, commit_id, i.name, " + "i.update_time, i.version, i.type " + "FROM InnerPubRepo " + "LEFT JOIN RepoInfo i ON InnerPubRepo.repo_id = i.repo_id, RepoOwner, Branch " + "WHERE InnerPubRepo.repo_id=RepoOwner.repo_id AND " + "InnerPubRepo.repo_id = Branch.repo_id AND Branch.name = 'master'" ctx, cancel := context.WithTimeout(context.Background(), option.DBOpTimeout) defer cancel() stmt, err := seafileDB.PrepareContext(ctx, query) if err != nil { return nil, err } defer stmt.Close() rows, err := stmt.QueryContext(ctx) if err != nil { return nil, err } defer rows.Close() var repos []*SharedRepo for rows.Next() { repo := new(SharedRepo) var repoName, repoType sql.NullString if err := rows.Scan(&repo.ID, &repo.Owner, &repo.Permission, &repo.HeadCommitID, &repoName, &repo.MTime, &repo.Version, &repoType); err == nil { if !repoName.Valid { continue } if repoName.String == "" { continue } repo.Name = repoName.String if repoType.Valid { repo.RepoType = repoType.String } repos = append(repos, repo) } } if err := rows.Err(); err != nil { return nil, err } return repos, nil } // ListShareRepos list share repos by email func ListShareRepos(email, columnType string) ([]*SharedRepo, error) { var repos []*SharedRepo var query string if columnType == "from_email" { query = "SELECT sh.repo_id, to_email, " + "permission, commit_id, " + "i.name, i.update_time, i.version, i.type FROM " + "SharedRepo sh LEFT JOIN RepoInfo i ON sh.repo_id = i.repo_id, Branch b " + "WHERE from_email=? AND " + "sh.repo_id = b.repo_id AND " + "b.name = 'master' " + "ORDER BY i.update_time DESC, sh.repo_id" } else if columnType == "to_email" { query = "SELECT sh.repo_id, from_email, " + "permission, commit_id, " + "i.name, i.update_time, i.version, i.type FROM " + "SharedRepo sh LEFT JOIN RepoInfo i ON sh.repo_id = i.repo_id, Branch b " + "WHERE to_email=? AND " + "sh.repo_id = b.repo_id AND " + "b.name = 'master' " + "ORDER BY i.update_time DESC, sh.repo_id" } else { err := fmt.Errorf("Wrong column type: %s", columnType) return nil, err } ctx, cancel := context.WithTimeout(context.Background(), option.DBOpTimeout) defer cancel() stmt, err := seafileDB.PrepareContext(ctx, query) if err != nil { return nil, err } defer stmt.Close() rows, err := stmt.QueryContext(ctx, email) if err != nil { return nil, err } defer rows.Close() for rows.Next() { repo := new(SharedRepo) var repoName, repoType sql.NullString if err := rows.Scan(&repo.ID, &repo.Owner, &repo.Permission, &repo.HeadCommitID, &repoName, &repo.MTime, &repo.Version, &repoType); err == nil { if !repoName.Valid { continue } if repoName.String == "" { continue } repo.Name = repoName.String if repoType.Valid { repo.RepoType = repoType.String } repos = append(repos, repo) } } if err := rows.Err(); err != nil { return nil, err } return repos, nil } // GetGroupReposByUser get group repos by user func GetGroupReposByUser(user string, orgID int) ([]*SharedRepo, error) { groups, err := getGroupsByUser(user, true) if err != nil { return nil, err } if len(groups) == 0 { return nil, nil } var sqlBuilder strings.Builder if orgID < 0 { sqlBuilder.WriteString("SELECT g.repo_id, " + "user_name, permission, commit_id, " + "i.name, i.update_time, i.version, i.type " + "FROM RepoGroup g " + "LEFT JOIN RepoInfo i ON g.repo_id = i.repo_id, " + "Branch b WHERE g.repo_id = b.repo_id AND " + "b.name = 'master' AND group_id IN (") } else { sqlBuilder.WriteString("SELECT g.repo_id, " + "owner, permission, commit_id, " + "i.name, i.update_time, i.version, i.type " + "FROM OrgGroupRepo g " + "LEFT JOIN RepoInfo i ON g.repo_id = i.repo_id, " + "Branch b WHERE g.repo_id = b.repo_id AND " + "b.name = 'master' AND group_id IN (") } for i := 0; i < len(groups); i++ { sqlBuilder.WriteString(strconv.Itoa(groups[i].id)) if i+1 < len(groups) { sqlBuilder.WriteString(",") } } sqlBuilder.WriteString(" ) ORDER BY group_id") ctx, cancel := context.WithTimeout(context.Background(), option.DBOpTimeout) defer cancel() rows, err := seafileDB.QueryContext(ctx, sqlBuilder.String()) if err != nil { return nil, err } defer rows.Close() var repos []*SharedRepo for rows.Next() { gRepo := new(SharedRepo) var repoType sql.NullString if err := rows.Scan(&gRepo.ID, &gRepo.Owner, &gRepo.Permission, &gRepo.HeadCommitID, &gRepo.Name, &gRepo.MTime, &gRepo.Version, &repoType); err == nil { if repoType.Valid { gRepo.RepoType = repoType.String } repos = append(repos, gRepo) } } if err := rows.Err(); err != nil { return nil, err } return repos, nil } ================================================ FILE: fileserver/size_sched.go ================================================ package main import ( "context" "encoding/json" "fmt" "path/filepath" "time" "gopkg.in/ini.v1" "database/sql" "github.com/go-redis/redis/v8" "github.com/haiwen/seafile-server/fileserver/commitmgr" "github.com/haiwen/seafile-server/fileserver/diff" "github.com/haiwen/seafile-server/fileserver/fsmgr" "github.com/haiwen/seafile-server/fileserver/option" "github.com/haiwen/seafile-server/fileserver/repomgr" "github.com/haiwen/seafile-server/fileserver/workerpool" log "github.com/sirupsen/logrus" ) const ( RepoSizeList = "repo_size_task" ) var updateSizePool *workerpool.WorkPool var redisClient *redis.Client func sizeSchedulerInit() { var n int = 1 var seafileConfPath string if centralDir != "" { seafileConfPath = filepath.Join(centralDir, "seafile.conf") } else { seafileConfPath = filepath.Join(absDataDir, "seafile.conf") } config, err := ini.Load(seafileConfPath) if err != nil { log.Fatalf("Failed to load seafile.conf: %v", err) } if section, err := config.GetSection("scheduler"); err == nil { if key, err := section.GetKey("size_sched_thread_num"); err == nil { num, err := key.Int() if err == nil { n = num } } } updateSizePool = workerpool.CreateWorkerPool(computeRepoSize, n) server := fmt.Sprintf("%s:%d", option.RedisHost, option.RedisPort) opt := &redis.Options{ Addr: server, Password: option.RedisPasswd, } opt.PoolSize = n redisClient = redis.NewClient(opt) } func computeRepoSize(args ...interface{}) error { if len(args) < 1 { return nil } repoID := args[0].(string) var size int64 var fileCount int64 repo := repomgr.Get(repoID) if repo == nil { err := fmt.Errorf("failed to get repo %s", repoID) return err } info, err := getOldRepoInfo(repoID) if err != nil { err := fmt.Errorf("failed to get old repo info: %v", err) return err } if info != nil && info.HeadID == repo.HeadCommitID { return nil } head, err := commitmgr.Load(repo.ID, repo.HeadCommitID) if err != nil { err := fmt.Errorf("failed to get head commit %s", repo.HeadCommitID) return err } var oldHead *commitmgr.Commit if info != nil { commit, _ := commitmgr.Load(repo.ID, info.HeadID) oldHead = commit } if info != nil && oldHead != nil { var results []*diff.DiffEntry var changeSize int64 var changeFileCount int64 err := diff.DiffCommits(oldHead, head, &results, false) if err != nil { err := fmt.Errorf("failed to do diff commits: %v", err) return err } for _, de := range results { if de.Status == diff.DiffStatusDeleted { changeSize -= de.Size changeFileCount-- } else if de.Status == diff.DiffStatusAdded { changeSize += de.Size changeFileCount++ } else if de.Status == diff.DiffStatusModified { changeSize = changeSize + de.Size - de.OriginSize } } size = info.Size + changeSize fileCount = info.FileCount + changeFileCount } else { info, err := fsmgr.GetFileCountInfoByPath(repo.StoreID, repo.RootID, "/") if err != nil { err := fmt.Errorf("failed to get file count") return err } fileCount = info.FileCount size = info.Size } err = setRepoSizeAndFileCount(repoID, repo.HeadCommitID, size, fileCount) if err != nil { err := fmt.Errorf("failed to set repo size and file count %s: %v", repoID, err) return err } err = notifyRepoSizeChange(repo.StoreID) if err != nil { log.Warnf("Failed to notify repo size change for repo %s: %v", repoID, err) } return nil } func setRepoSizeAndFileCount(repoID, newHeadID string, size, fileCount int64) error { ctx, cancel := context.WithTimeout(context.Background(), option.DBOpTimeout) defer cancel() trans, err := seafileDB.BeginTx(ctx, nil) if err != nil { err := fmt.Errorf("failed to start transaction: %v", err) return err } var headID string sqlStr := "SELECT head_id FROM RepoSize WHERE repo_id=?" row := trans.QueryRowContext(ctx, sqlStr, repoID) if err := row.Scan(&headID); err != nil { if err != sql.ErrNoRows { trans.Rollback() return err } } if headID == "" { sqlStr := "INSERT INTO RepoSize (repo_id, size, head_id) VALUES (?, ?, ?)" _, err = trans.ExecContext(ctx, sqlStr, repoID, size, newHeadID) if err != nil { trans.Rollback() return err } } else { sqlStr = "UPDATE RepoSize SET size = ?, head_id = ? WHERE repo_id = ?" _, err = trans.ExecContext(ctx, sqlStr, size, newHeadID, repoID) if err != nil { trans.Rollback() return err } } var exist int sqlStr = "SELECT 1 FROM RepoFileCount WHERE repo_id=?" row = trans.QueryRowContext(ctx, sqlStr, repoID) if err := row.Scan(&exist); err != nil { if err != sql.ErrNoRows { trans.Rollback() return err } } if exist != 0 { sqlStr := "UPDATE RepoFileCount SET file_count=? WHERE repo_id=?" _, err = trans.ExecContext(ctx, sqlStr, fileCount, repoID) if err != nil { trans.Rollback() return err } } else { sqlStr := "INSERT INTO RepoFileCount (repo_id,file_count) VALUES (?,?)" _, err = trans.ExecContext(ctx, sqlStr, repoID, fileCount) if err != nil { trans.Rollback() return err } } trans.Commit() return nil } type RepoSizeChangeTask struct { RepoID string `json:"repo_id"` } func notifyRepoSizeChange(repoID string) error { if !option.HasRedisOptions { return nil } task := &RepoSizeChangeTask{RepoID: repoID} data, err := json.Marshal(task) if err != nil { return fmt.Errorf("failed to encode repo size change task: %w", err) } ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() err = redisClient.LPush(ctx, RepoSizeList, data).Err() if err != nil { return fmt.Errorf("failed to push message to redis list %s: %w", RepoSizeList, err) } return nil } // RepoInfo contains repo information. type RepoInfo struct { HeadID string Size int64 FileCount int64 } func getOldRepoInfo(repoID string) (*RepoInfo, error) { sqlStr := "select s.head_id,s.size,f.file_count FROM RepoSize s LEFT JOIN RepoFileCount f ON " + "s.repo_id=f.repo_id WHERE s.repo_id=?" repoInfo := new(RepoInfo) ctx, cancel := context.WithTimeout(context.Background(), option.DBOpTimeout) defer cancel() row := seafileDB.QueryRowContext(ctx, sqlStr, repoID) if err := row.Scan(&repoInfo.HeadID, &repoInfo.Size, &repoInfo.FileCount); err != nil { if err != sql.ErrNoRows { return nil, err } return nil, nil } return repoInfo, nil } ================================================ FILE: fileserver/sync_api.go ================================================ package main import ( "bytes" "context" "database/sql" "encoding/binary" "encoding/json" "errors" "fmt" "html" "io" "net" "net/http" "strconv" "strings" "sync" "time" "github.com/gorilla/mux" "github.com/haiwen/seafile-server/fileserver/blockmgr" "github.com/haiwen/seafile-server/fileserver/commitmgr" "github.com/haiwen/seafile-server/fileserver/diff" "github.com/haiwen/seafile-server/fileserver/fsmgr" "github.com/haiwen/seafile-server/fileserver/option" "github.com/haiwen/seafile-server/fileserver/repomgr" "github.com/haiwen/seafile-server/fileserver/share" "github.com/haiwen/seafile-server/fileserver/utils" "github.com/haiwen/seafile-server/fileserver/workerpool" log "github.com/sirupsen/logrus" ) type checkExistType int32 const ( checkFSExist checkExistType = 0 checkBlockExist checkExistType = 1 ) const ( seafileServerChannelEvent = "seaf_server.event" seafileServerChannelStats = "seaf_server.stats" emptySHA1 = "0000000000000000000000000000000000000000" tokenExpireTime = 7200 permExpireTime = 7200 virtualRepoExpireTime = 7200 syncAPICleaningIntervalSec = 300 maxObjectPackSize = 1 << 20 // 1MB fsIdWorkers = 10 ) var ( tokenCache sync.Map permCache sync.Map virtualRepoInfoCache sync.Map calFsIdPool *workerpool.WorkPool ) type tokenInfo struct { repoID string email string expireTime int64 } type permInfo struct { perm string expireTime int64 } type virtualRepoInfo struct { storeID string expireTime int64 } type repoEventData struct { eType string user string ip string repoID string path string clientName string } type statsEventData struct { eType string user string repoID string bytes uint64 } func syncAPIInit() { ticker := time.NewTicker(time.Second * syncAPICleaningIntervalSec) go RecoverWrapper(func() { for range ticker.C { removeSyncAPIExpireCache() } }) calFsIdPool = workerpool.CreateWorkerPool(getFsId, fsIdWorkers) } type calResult struct { user string err *appError } func getFsId(args ...interface{}) error { if len(args) < 3 { return nil } resChan := args[0].(chan *calResult) rsp := args[1].(http.ResponseWriter) r := args[2].(*http.Request) queries := r.URL.Query() serverHead := queries.Get("server-head") if !utils.IsObjectIDValid(serverHead) { msg := "Invalid server-head parameter." appErr := &appError{nil, msg, http.StatusBadRequest} resChan <- &calResult{"", appErr} return nil } clientHead := queries.Get("client-head") if clientHead != "" && !utils.IsObjectIDValid(clientHead) { msg := "Invalid client-head parameter." appErr := &appError{nil, msg, http.StatusBadRequest} resChan <- &calResult{"", appErr} return nil } dirOnlyArg := queries.Get("dir-only") var dirOnly bool if dirOnlyArg != "" { dirOnly = true } vars := mux.Vars(r) repoID := vars["repoid"] user, appErr := validateToken(r, repoID, false) if appErr != nil { resChan <- &calResult{user, appErr} return nil } appErr = checkPermission(repoID, user, "download", false) if appErr != nil { resChan <- &calResult{user, appErr} return nil } repo := repomgr.Get(repoID) if repo == nil { err := fmt.Errorf("Failed to find repo %.8s", repoID) appErr := &appError{err, "", http.StatusInternalServerError} resChan <- &calResult{user, appErr} return nil } ret, err := calculateSendObjectList(r.Context(), repo, serverHead, clientHead, dirOnly) if err != nil { if !errors.Is(err, context.Canceled) { err := fmt.Errorf("Failed to get fs id list: %w", err) appErr := &appError{err, "", http.StatusInternalServerError} resChan <- &calResult{user, appErr} return nil } appErr := &appError{nil, "", http.StatusInternalServerError} resChan <- &calResult{user, appErr} return nil } var objList []byte if ret != nil { objList, err = json.Marshal(ret) if err != nil { appErr := &appError{err, "", http.StatusInternalServerError} resChan <- &calResult{user, appErr} return nil } } else { // when get obj list is nil, return [] objList = []byte{'[', ']'} } rsp.Header().Set("Content-Length", strconv.Itoa(len(objList))) rsp.WriteHeader(http.StatusOK) rsp.Write(objList) resChan <- &calResult{user, nil} return nil } func permissionCheckCB(rsp http.ResponseWriter, r *http.Request) *appError { queries := r.URL.Query() op := queries.Get("op") if op != "download" && op != "upload" { msg := "op is invalid" return &appError{nil, msg, http.StatusBadRequest} } clientID := queries.Get("client_id") if clientID != "" && len(clientID) != 40 { msg := "client_id is invalid" return &appError{nil, msg, http.StatusBadRequest} } clientVer := queries.Get("client_ver") if clientVer != "" { status := validateClientVer(clientVer) if status != http.StatusOK { msg := "client_ver is invalid" return &appError{nil, msg, status} } } clientName := queries.Get("client_name") if clientName != "" { clientName = html.UnescapeString(clientName) } vars := mux.Vars(r) repoID := vars["repoid"] repo := repomgr.GetEx(repoID) if repo == nil { msg := "repo was deleted" return &appError{nil, msg, seafHTTPResRepoDeleted} } if repo.IsCorrupted { msg := "repo was corrupted" return &appError{nil, msg, seafHTTPResRepoCorrupted} } user, err := validateToken(r, repoID, true) if err != nil { return err } err = checkPermission(repoID, user, op, true) if err != nil { return err } ip := getClientIPAddr(r) if ip == "" { token := r.Header.Get("Seafile-Repo-Token") err := fmt.Errorf("%s failed to get client ip", token) return &appError{err, "", http.StatusInternalServerError} } if op == "download" { onRepoOper("repo-download-sync", repoID, user, ip, clientName) } if clientID != "" && clientName != "" { token := r.Header.Get("Seafile-Repo-Token") exists, err := repomgr.TokenPeerInfoExists(token) if err != nil { err := fmt.Errorf("Failed to check whether token %s peer info exist: %v", token, err) return &appError{err, "", http.StatusInternalServerError} } if !exists { if err := repomgr.AddTokenPeerInfo(token, clientID, ip, clientName, clientVer, int64(time.Now().Unix())); err != nil { err := fmt.Errorf("Failed to add token peer info: %v", err) return &appError{err, "", http.StatusInternalServerError} } } else { if err := repomgr.UpdateTokenPeerInfo(token, clientID, clientVer, int64(time.Now().Unix())); err != nil { err := fmt.Errorf("Failed to update token peer info: %v", err) return &appError{err, "", http.StatusInternalServerError} } } } return nil } func getBlockMapCB(rsp http.ResponseWriter, r *http.Request) *appError { vars := mux.Vars(r) repoID := vars["repoid"] fileID := vars["id"] user, appErr := validateToken(r, repoID, false) if appErr != nil { return appErr } appErr = checkPermission(repoID, user, "download", false) if appErr != nil { return appErr } storeID, err := getRepoStoreID(repoID) if err != nil { err := fmt.Errorf("Failed to get repo store id by repo id %s: %v", repoID, err) return &appError{err, "", http.StatusInternalServerError} } seafile, err := fsmgr.GetSeafile(storeID, fileID) if err != nil { msg := fmt.Sprintf("Failed to get seafile object by file id %s: %v", fileID, err) return &appError{nil, msg, http.StatusNotFound} } var blockSizes []int64 for _, blockID := range seafile.BlkIDs { blockSize, err := blockmgr.Stat(storeID, blockID) if err != nil { err := fmt.Errorf("Failed to find block %s/%s", storeID, blockID) return &appError{err, "", http.StatusInternalServerError} } blockSizes = append(blockSizes, blockSize) } var data []byte if blockSizes != nil { data, err = json.Marshal(blockSizes) if err != nil { err := fmt.Errorf("Failed to marshal json: %v", err) return &appError{err, "", http.StatusInternalServerError} } } else { data = []byte{'[', ']'} } rsp.Header().Set("Content-Length", strconv.Itoa(len(data))) rsp.WriteHeader(http.StatusOK) rsp.Write(data) return nil } func getAccessibleRepoListCB(rsp http.ResponseWriter, r *http.Request) *appError { queries := r.URL.Query() repoID := queries.Get("repo_id") if repoID == "" || !utils.IsValidUUID(repoID) { msg := "Invalid repo id." return &appError{nil, msg, http.StatusBadRequest} } user, appErr := validateToken(r, repoID, false) if appErr != nil { return appErr } obtainedRepos := make(map[string]string) repos, err := share.GetReposByOwner(user) if err != nil { err := fmt.Errorf("Failed to get repos by owner %s: %v", user, err) return &appError{err, "", http.StatusInternalServerError} } var repoObjects []*share.SharedRepo for _, repo := range repos { if repo.RepoType != "" { continue } if _, ok := obtainedRepos[repo.ID]; !ok { obtainedRepos[repo.ID] = repo.ID } repo.Permission = "rw" repo.Type = "repo" repo.Owner = user repoObjects = append(repoObjects, repo) } repos, err = share.ListShareRepos(user, "to_email") if err != nil { err := fmt.Errorf("Failed to get share repos by user %s: %v", user, err) return &appError{err, "", http.StatusInternalServerError} } for _, sRepo := range repos { if _, ok := obtainedRepos[sRepo.ID]; ok { continue } if sRepo.RepoType != "" { continue } sRepo.Type = "srepo" sRepo.Owner = strings.ToLower(sRepo.Owner) repoObjects = append(repoObjects, sRepo) } repos, err = share.GetGroupReposByUser(user, -1) if err != nil { err := fmt.Errorf("Failed to get group repos by user %s: %v", user, err) return &appError{err, "", http.StatusInternalServerError} } reposTable := filterGroupRepos(repos) for _, gRepo := range reposTable { if _, ok := obtainedRepos[gRepo.ID]; ok { continue } gRepo.Type = "grepo" gRepo.Owner = strings.ToLower(gRepo.Owner) repoObjects = append(repoObjects, gRepo) } repos, err = share.ListInnerPubRepos() if err != nil { err := fmt.Errorf("Failed to get inner public repos: %v", err) return &appError{err, "", http.StatusInternalServerError} } for _, sRepo := range repos { if _, ok := obtainedRepos[sRepo.ID]; ok { continue } if sRepo.RepoType != "" { continue } sRepo.Type = "grepo" sRepo.Owner = "Organization" repoObjects = append(repoObjects, sRepo) } var data []byte if repoObjects != nil { data, err = json.Marshal(repoObjects) if err != nil { err := fmt.Errorf("Failed to marshal json: %v", err) return &appError{err, "", http.StatusInternalServerError} } } else { data = []byte{'[', ']'} } rsp.Header().Set("Content-Length", strconv.Itoa(len(data))) rsp.WriteHeader(http.StatusOK) rsp.Write(data) return nil } func filterGroupRepos(repos []*share.SharedRepo) map[string]*share.SharedRepo { table := make(map[string]*share.SharedRepo) for _, repo := range repos { if repo.RepoType != "" { continue } if repoPrev, ok := table[repo.ID]; ok { if repo.Permission == "rw" && repoPrev.Permission == "r" { table[repo.ID] = repo } } else { table[repo.ID] = repo } } return table } func recvFSCB(rsp http.ResponseWriter, r *http.Request) *appError { vars := mux.Vars(r) repoID := vars["repoid"] user, appErr := validateToken(r, repoID, false) if appErr != nil { return appErr } appErr = checkPermission(repoID, user, "upload", false) if appErr != nil { return appErr } storeID, err := getRepoStoreID(repoID) if err != nil { err := fmt.Errorf("Failed to get repo store id by repo id %s: %v", repoID, err) return &appError{err, "", http.StatusInternalServerError} } fsBuf, err := io.ReadAll(r.Body) if err != nil { return &appError{nil, err.Error(), http.StatusBadRequest} } for len(fsBuf) > 44 { objID := string(fsBuf[:40]) if !utils.IsObjectIDValid(objID) { msg := fmt.Sprintf("Fs obj id %s is invalid", objID) return &appError{nil, msg, http.StatusBadRequest} } var objSize uint32 sizeBuffer := bytes.NewBuffer(fsBuf[40:44]) if err := binary.Read(sizeBuffer, binary.BigEndian, &objSize); err != nil { msg := fmt.Sprintf("Failed to read fs obj size: %v", err) return &appError{nil, msg, http.StatusBadRequest} } if len(fsBuf) < int(44+objSize) { msg := "Request body size invalid" return &appError{nil, msg, http.StatusBadRequest} } objBuffer := bytes.NewBuffer(fsBuf[44 : 44+objSize]) if err := fsmgr.WriteRaw(storeID, objID, objBuffer); err != nil { err := fmt.Errorf("Failed to write fs obj %s:%s : %v", storeID, objID, err) return &appError{err, "", http.StatusInternalServerError} } fsBuf = fsBuf[44+objSize:] } if len(fsBuf) == 0 { rsp.WriteHeader(http.StatusOK) return nil } msg := "Request body size invalid" return &appError{nil, msg, http.StatusBadRequest} } func checkFSCB(rsp http.ResponseWriter, r *http.Request) *appError { return postCheckExistCB(rsp, r, checkFSExist) } func checkBlockCB(rsp http.ResponseWriter, r *http.Request) *appError { return postCheckExistCB(rsp, r, checkBlockExist) } func postCheckExistCB(rsp http.ResponseWriter, r *http.Request, existType checkExistType) *appError { vars := mux.Vars(r) repoID := vars["repoid"] user, appErr := validateToken(r, repoID, false) if appErr != nil { return appErr } appErr = checkPermission(repoID, user, "download", false) if appErr != nil { return appErr } storeID, err := getRepoStoreID(repoID) if err != nil { err := fmt.Errorf("Failed to get repo store id by repo id %s: %v", repoID, err) return &appError{err, "", http.StatusInternalServerError} } var objIDList []string if err := json.NewDecoder(r.Body).Decode(&objIDList); err != nil { return &appError{nil, err.Error(), http.StatusBadRequest} } var neededObjs []string var ret bool for i := 0; i < len(objIDList); i++ { if !utils.IsObjectIDValid(objIDList[i]) { continue } if existType == checkFSExist { ret, _ = fsmgr.Exists(storeID, objIDList[i]) } else if existType == checkBlockExist { ret = blockmgr.Exists(storeID, objIDList[i]) } if !ret { neededObjs = append(neededObjs, objIDList[i]) } } var data []byte if neededObjs != nil { data, err = json.Marshal(neededObjs) if err != nil { err := fmt.Errorf("Failed to marshal json: %v", err) return &appError{err, "", http.StatusInternalServerError} } } else { data = []byte{'[', ']'} } rsp.Header().Set("Content-Length", strconv.Itoa(len(data))) rsp.WriteHeader(http.StatusOK) rsp.Write(data) return nil } func packFSCB(rsp http.ResponseWriter, r *http.Request) *appError { vars := mux.Vars(r) repoID := vars["repoid"] user, appErr := validateToken(r, repoID, false) if appErr != nil { return appErr } appErr = checkPermission(repoID, user, "download", false) if appErr != nil { return appErr } storeID, err := getRepoStoreID(repoID) if err != nil { err := fmt.Errorf("Failed to get repo store id by repo id %s: %v", repoID, err) return &appError{err, "", http.StatusInternalServerError} } var fsIDList []string if err := json.NewDecoder(r.Body).Decode(&fsIDList); err != nil { return &appError{nil, err.Error(), http.StatusBadRequest} } var totalSize int var data bytes.Buffer for i := 0; i < len(fsIDList); i++ { if !utils.IsObjectIDValid(fsIDList[i]) { msg := fmt.Sprintf("Invalid fs id %s", fsIDList[i]) return &appError{nil, msg, http.StatusBadRequest} } data.WriteString(fsIDList[i]) var tmp bytes.Buffer if err := fsmgr.ReadRaw(storeID, fsIDList[i], &tmp); err != nil { err := fmt.Errorf("Failed to read fs %s:%s: %v", storeID, fsIDList[i], err) return &appError{err, "", http.StatusInternalServerError} } tmpLen := make([]byte, 4) binary.BigEndian.PutUint32(tmpLen, uint32(tmp.Len())) data.Write(tmpLen) data.Write(tmp.Bytes()) totalSize += tmp.Len() if totalSize >= maxObjectPackSize { break } } rsp.Header().Set("Content-Length", strconv.Itoa(data.Len())) rsp.WriteHeader(http.StatusOK) rsp.Write(data.Bytes()) return nil } func headCommitsMultiCB(rsp http.ResponseWriter, r *http.Request) *appError { var repoIDList []string if err := json.NewDecoder(r.Body).Decode(&repoIDList); err != nil { return &appError{err, "", http.StatusBadRequest} } if len(repoIDList) == 0 { return &appError{nil, "", http.StatusBadRequest} } var repoIDs strings.Builder for i := 0; i < len(repoIDList); i++ { if !utils.IsValidUUID(repoIDList[i]) { return &appError{nil, "", http.StatusBadRequest} } if i == 0 { repoIDs.WriteString(fmt.Sprintf("'%s'", repoIDList[i])) } else { repoIDs.WriteString(fmt.Sprintf(",'%s'", repoIDList[i])) } } sqlStr := fmt.Sprintf( "SELECT repo_id, commit_id FROM Branch WHERE name='master' AND "+ "repo_id IN (%s) LOCK IN SHARE MODE", repoIDs.String()) ctx, cancel := context.WithTimeout(context.Background(), option.DBOpTimeout) defer cancel() rows, err := seafileDB.QueryContext(ctx, sqlStr) if err != nil { err := fmt.Errorf("Failed to get commit id: %v", err) return &appError{err, "", http.StatusInternalServerError} } defer rows.Close() commitIDMap := make(map[string]string) var repoID string var commitID string for rows.Next() { if err := rows.Scan(&repoID, &commitID); err == nil { commitIDMap[repoID] = commitID } } if err := rows.Err(); err != nil { err := fmt.Errorf("Failed to get commit id: %v", err) return &appError{err, "", http.StatusInternalServerError} } data, err := json.Marshal(commitIDMap) if err != nil { err := fmt.Errorf("Failed to marshal json: %v", err) return &appError{err, "", http.StatusInternalServerError} } rsp.Header().Set("Content-Length", strconv.Itoa(len(data))) rsp.WriteHeader(http.StatusOK) rsp.Write(data) return nil } func getCheckQuotaCB(rsp http.ResponseWriter, r *http.Request) *appError { vars := mux.Vars(r) repoID := vars["repoid"] if _, err := validateToken(r, repoID, false); err != nil { return err } queries := r.URL.Query() delta := queries.Get("delta") if delta == "" { msg := "Invalid delta parameter" return &appError{nil, msg, http.StatusBadRequest} } deltaNum, err := strconv.ParseInt(delta, 10, 64) if err != nil { msg := "Invalid delta parameter" return &appError{nil, msg, http.StatusBadRequest} } ret, err := checkQuota(repoID, deltaNum) if err != nil { msg := "Internal error.\n" err := fmt.Errorf("failed to check quota: %v", err) return &appError{err, msg, http.StatusInternalServerError} } if ret == 1 { msg := "Out of quota.\n" return &appError{nil, msg, seafHTTPResNoQuota} } return nil } func getJWTTokenCB(rsp http.ResponseWriter, r *http.Request) *appError { vars := mux.Vars(r) repoID := vars["repoid"] if !option.EnableNotification { return &appError{nil, "", http.StatusNotFound} } user, appErr := validateToken(r, repoID, false) if appErr != nil { return appErr } exp := time.Now().Add(time.Hour * 72).Unix() tokenString, err := utils.GenNotifJWTToken(repoID, user, exp) if err != nil { return &appError{err, "", http.StatusInternalServerError} } data := fmt.Sprintf("{\"jwt_token\":\"%s\"}", tokenString) rsp.Write([]byte(data)) return nil } func getFsObjIDCB(rsp http.ResponseWriter, r *http.Request) *appError { recvChan := make(chan *calResult) calFsIdPool.AddTask(recvChan, rsp, r) result := <-recvChan return result.err } func headCommitOperCB(rsp http.ResponseWriter, r *http.Request) *appError { if r.Method == http.MethodGet { return getHeadCommit(rsp, r) } else if r.Method == http.MethodPut { return putUpdateBranchCB(rsp, r) } return &appError{nil, "", http.StatusBadRequest} } func commitOperCB(rsp http.ResponseWriter, r *http.Request) *appError { if r.Method == http.MethodGet { return getCommitInfo(rsp, r) } else if r.Method == http.MethodPut { return putCommitCB(rsp, r) } return &appError{nil, "", http.StatusBadRequest} } func blockOperCB(rsp http.ResponseWriter, r *http.Request) *appError { if r.Method == http.MethodGet { return getBlockInfo(rsp, r) } else if r.Method == http.MethodPut { return putSendBlockCB(rsp, r) } return &appError{nil, "", http.StatusBadRequest} } func putSendBlockCB(rsp http.ResponseWriter, r *http.Request) *appError { vars := mux.Vars(r) repoID := vars["repoid"] blockID := vars["id"] user, appErr := validateToken(r, repoID, false) if appErr != nil { return appErr } appErr = checkPermission(repoID, user, "upload", false) if appErr != nil { return appErr } storeID, err := getRepoStoreID(repoID) if err != nil { err := fmt.Errorf("Failed to get repo store id by repo id %s: %v", repoID, err) return &appError{err, "", http.StatusInternalServerError} } if err := blockmgr.Write(storeID, blockID, r.Body); err != nil { err := fmt.Errorf("Failed to write block %.8s:%s: %v", storeID, blockID, err) return &appError{err, "", http.StatusInternalServerError} } sendStatisticMsg(storeID, user, "sync-file-upload", uint64(r.ContentLength)) return nil } func getBlockInfo(rsp http.ResponseWriter, r *http.Request) *appError { vars := mux.Vars(r) repoID := vars["repoid"] blockID := vars["id"] user, appErr := validateToken(r, repoID, false) if appErr != nil { return appErr } appErr = checkPermission(repoID, user, "download", false) if appErr != nil { return appErr } storeID, err := getRepoStoreID(repoID) if err != nil { err := fmt.Errorf("Failed to get repo store id by repo id %s: %v", repoID, err) return &appError{err, "", http.StatusInternalServerError} } blockSize, err := blockmgr.Stat(storeID, blockID) if err != nil { return &appError{err, "", http.StatusInternalServerError} } if blockSize <= 0 { err := fmt.Errorf("block %.8s:%s size invalid", storeID, blockID) return &appError{err, "", http.StatusInternalServerError} } blockLen := fmt.Sprintf("%d", blockSize) rsp.Header().Set("Content-Length", blockLen) if err := blockmgr.Read(storeID, blockID, rsp); err != nil { if !isNetworkErr(err) { log.Errorf("failed to read block %s: %v", blockID, err) } return nil } sendStatisticMsg(storeID, user, "sync-file-download", uint64(blockSize)) return nil } func getRepoStoreID(repoID string) (string, error) { var storeID string if value, ok := virtualRepoInfoCache.Load(repoID); ok { if info, ok := value.(*virtualRepoInfo); ok { if info.storeID != "" { storeID = info.storeID } else { storeID = repoID } info.expireTime = time.Now().Unix() + virtualRepoExpireTime } } if storeID != "" { return storeID, nil } var vInfo virtualRepoInfo var rID, originRepoID sql.NullString sqlStr := "SELECT repo_id, origin_repo FROM VirtualRepo where repo_id = ?" ctx, cancel := context.WithTimeout(context.Background(), option.DBOpTimeout) defer cancel() row := seafileDB.QueryRowContext(ctx, sqlStr, repoID) if err := row.Scan(&rID, &originRepoID); err != nil { if err == sql.ErrNoRows { vInfo.storeID = repoID vInfo.expireTime = time.Now().Unix() + virtualRepoExpireTime virtualRepoInfoCache.Store(repoID, &vInfo) return repoID, nil } return "", err } if !rID.Valid || !originRepoID.Valid { return "", nil } vInfo.storeID = originRepoID.String vInfo.expireTime = time.Now().Unix() + virtualRepoExpireTime virtualRepoInfoCache.Store(repoID, &vInfo) return originRepoID.String, nil } func sendStatisticMsg(repoID, user, operation string, bytes uint64) { rData := &statsEventData{operation, user, repoID, bytes} publishStatsEvent(rData) } func publishStatsEvent(rData *statsEventData) { data := make(map[string]interface{}) data["msg_type"] = rData.eType data["user_name"] = rData.user data["repo_id"] = rData.repoID data["bytes"] = rData.bytes jsonData, err := json.Marshal(data) if err != nil { log.Warnf("Failed to publish event: %v", err) return } if _, err := rpcclient.Call("publish_event", seafileServerChannelStats, string(jsonData)); err != nil { log.Warnf("Failed to publish event: %v", err) } } func saveLastGCID(repoID, token string) error { repo := repomgr.Get(repoID) if repo == nil { return fmt.Errorf("failed to get repo: %s", repoID) } gcID, err := repomgr.GetCurrentGCID(repo.StoreID) if err != nil { return err } return repomgr.SetLastGCID(repoID, token, gcID) } func putCommitCB(rsp http.ResponseWriter, r *http.Request) *appError { vars := mux.Vars(r) repoID := vars["repoid"] commitID := vars["id"] user, appErr := validateToken(r, repoID, false) if appErr != nil { return appErr } appErr = checkPermission(repoID, user, "upload", true) if appErr != nil { return appErr } data, err := io.ReadAll(r.Body) if err != nil { return &appError{nil, err.Error(), http.StatusBadRequest} } commit := new(commitmgr.Commit) if err := commit.FromData(data); err != nil { return &appError{nil, err.Error(), http.StatusBadRequest} } if commit.RepoID != repoID { msg := "The repo id in commit does not match current repo id" return &appError{nil, msg, http.StatusBadRequest} } if err := commitmgr.Save(commit); err != nil { err := fmt.Errorf("Failed to add commit %s: %v", commitID, err) return &appError{err, "", http.StatusInternalServerError} } else { token := r.Header.Get("Seafile-Repo-Token") if token == "" { token = utils.GetAuthorizationToken(r.Header) } if err := saveLastGCID(repoID, token); err != nil { err := fmt.Errorf("Failed to save gc id: %v", err) return &appError{err, "", http.StatusInternalServerError} } } return nil } func getCommitInfo(rsp http.ResponseWriter, r *http.Request) *appError { vars := mux.Vars(r) repoID := vars["repoid"] commitID := vars["id"] user, appErr := validateToken(r, repoID, false) if appErr != nil { return appErr } appErr = checkPermission(repoID, user, "download", false) if appErr != nil { return appErr } if exists, _ := commitmgr.Exists(repoID, commitID); !exists { return &appError{nil, "", http.StatusNotFound} } var data bytes.Buffer err := commitmgr.ReadRaw(repoID, commitID, &data) if err != nil { err := fmt.Errorf("Failed to read commit %s:%s: %v", repoID, commitID, err) return &appError{err, "", http.StatusInternalServerError} } dataLen := strconv.Itoa(data.Len()) rsp.Header().Set("Content-Length", dataLen) rsp.WriteHeader(http.StatusOK) rsp.Write(data.Bytes()) return nil } func putUpdateBranchCB(rsp http.ResponseWriter, r *http.Request) *appError { queries := r.URL.Query() newCommitID := queries.Get("head") if newCommitID == "" || !utils.IsObjectIDValid(newCommitID) { msg := fmt.Sprintf("commit id %s is invalid", newCommitID) return &appError{nil, msg, http.StatusBadRequest} } vars := mux.Vars(r) repoID := vars["repoid"] user, appErr := validateToken(r, repoID, false) if appErr != nil { return appErr } appErr = checkPermission(repoID, user, "upload", false) if appErr != nil && appErr.Code == http.StatusForbidden { return appErr } repo := repomgr.Get(repoID) if repo == nil { err := fmt.Errorf("Repo %s is missing or corrupted", repoID) return &appError{err, "", http.StatusInternalServerError} } newCommit, err := commitmgr.Load(repoID, newCommitID) if err != nil { err := fmt.Errorf("Failed to get commit %s for repo %s", newCommitID, repoID) return &appError{err, "", http.StatusInternalServerError} } base, err := commitmgr.Load(repoID, newCommit.ParentID.String) if err != nil { err := fmt.Errorf("Failed to get commit %s for repo %s", newCommit.ParentID.String, repoID) return &appError{err, "", http.StatusInternalServerError} } if includeInvalidPath(base, newCommit) { msg := "Dir or file name is .." return &appError{nil, msg, http.StatusBadRequest} } ret, err := checkQuota(repoID, 0) if err != nil { err := fmt.Errorf("Failed to check quota: %v", err) return &appError{err, "", http.StatusInternalServerError} } if ret == 1 { msg := "Out of quota.\n" return &appError{nil, msg, seafHTTPResNoQuota} } if option.VerifyClientBlocks { if body, err := checkBlocks(r.Context(), repo, base, newCommit); err != nil { return &appError{nil, body, seafHTTPResBlockMissing} } } token := r.Header.Get("Seafile-Repo-Token") if token == "" { token = utils.GetAuthorizationToken(r.Header) } if err := fastForwardOrMerge(user, token, repo, base, newCommit); err != nil { if errors.Is(err, ErrGCConflict) { return &appError{nil, "GC Conflict.\n", http.StatusConflict} } else { err := fmt.Errorf("Fast forward merge for repo %s is failed: %v", repoID, err) return &appError{err, "", http.StatusInternalServerError} } } go mergeVirtualRepoPool.AddTask(repoID, "") go updateSizePool.AddTask(repoID) rsp.WriteHeader(http.StatusOK) return nil } type checkBlockAux struct { storeID string version int fileList []string } func checkBlocks(ctx context.Context, repo *repomgr.Repo, base, remote *commitmgr.Commit) (string, error) { aux := new(checkBlockAux) aux.storeID = repo.StoreID aux.version = repo.Version opt := &diff.DiffOptions{ FileCB: checkFileBlocks, DirCB: checkDirCB, Ctx: ctx, RepoID: repo.StoreID} opt.Data = aux trees := []string{base.RootID, remote.RootID} if err := diff.DiffTrees(trees, opt); err != nil { return "", err } if len(aux.fileList) == 0 { return "", nil } body, _ := json.Marshal(aux.fileList) return string(body), fmt.Errorf("block is missing") } func checkFileBlocks(ctx context.Context, baseDir string, files []*fsmgr.SeafDirent, data interface{}) error { select { case <-ctx.Done(): return context.Canceled default: } file1 := files[0] file2 := files[1] aux, ok := data.(*checkBlockAux) if !ok { err := fmt.Errorf("failed to assert results") return err } if file2 == nil || file2.ID == emptySHA1 || (file1 != nil && file1.ID == file2.ID) { return nil } file, err := fsmgr.GetSeafile(aux.storeID, file2.ID) if err != nil { return err } for _, blkID := range file.BlkIDs { if !blockmgr.Exists(aux.storeID, blkID) { aux.fileList = append(aux.fileList, file2.Name) return nil } } return nil } func checkDirCB(ctx context.Context, baseDir string, dirs []*fsmgr.SeafDirent, data interface{}, recurse *bool) error { select { case <-ctx.Done(): return context.Canceled default: } dir1 := dirs[0] dir2 := dirs[1] if dir1 == nil { // if dir2 is empty, stop diff. if dir2.ID == diff.EmptySha1 { *recurse = false } else { *recurse = true } return nil } // if dir2 is not exist, stop diff. if dir2 == nil { *recurse = false return nil } // if dir1 and dir2 are the same or dir2 is empty, stop diff. if dir1.ID == dir2.ID || dir2.ID == diff.EmptySha1 { *recurse = false return nil } return nil } func includeInvalidPath(baseCommit, newCommit *commitmgr.Commit) bool { var results []*diff.DiffEntry if err := diff.DiffCommits(baseCommit, newCommit, &results, true); err != nil { log.Infof("Failed to diff commits: %v", err) return false } for _, entry := range results { if entry.NewName != "" { if shouldIgnore(entry.NewName) { return true } } else { if shouldIgnore(entry.Name) { return true } } } return false } func getHeadCommit(rsp http.ResponseWriter, r *http.Request) *appError { vars := mux.Vars(r) repoID := vars["repoid"] sqlStr := "SELECT EXISTS(SELECT 1 FROM Repo WHERE repo_id=?)" var exists bool ctx, cancel := context.WithTimeout(context.Background(), option.DBOpTimeout) defer cancel() row := seafileDB.QueryRowContext(ctx, sqlStr, repoID) if err := row.Scan(&exists); err != nil { if err != sql.ErrNoRows { log.Errorf("DB error when check repo %s existence: %v", repoID, err) msg := `{"is_corrupted": 1}` rsp.WriteHeader(http.StatusOK) rsp.Write([]byte(msg)) return nil } } if !exists { return &appError{nil, "", seafHTTPResRepoDeleted} } if _, err := validateToken(r, repoID, false); err != nil { return err } var commitID string sqlStr = "SELECT commit_id FROM Branch WHERE name='master' AND repo_id=?" row = seafileDB.QueryRowContext(ctx, sqlStr, repoID) if err := row.Scan(&commitID); err != nil { if err != sql.ErrNoRows { log.Errorf("DB error when get branch master: %v", err) msg := `{"is_corrupted": 1}` rsp.WriteHeader(http.StatusOK) rsp.Write([]byte(msg)) return nil } } if commitID == "" { return &appError{nil, "", http.StatusBadRequest} } msg := fmt.Sprintf("{\"is_corrupted\": 0, \"head_commit_id\": \"%s\"}", commitID) rsp.WriteHeader(http.StatusOK) rsp.Write([]byte(msg)) return nil } func checkPermission(repoID, user, op string, skipCache bool) *appError { var info *permInfo if !skipCache { if value, ok := permCache.Load(fmt.Sprintf("%s:%s:%s", repoID, user, op)); ok { info = value.(*permInfo) } } if info != nil { return nil } permCache.Delete(fmt.Sprintf("%s:%s:%s", repoID, user, op)) if op == "upload" { status, err := repomgr.GetRepoStatus(repoID) if err != nil { msg := fmt.Sprintf("Failed to get repo status by repo id %s: %v", repoID, err) return &appError{nil, msg, http.StatusForbidden} } if status != repomgr.RepoStatusNormal && status != -1 { return &appError{nil, "", http.StatusForbidden} } } perm := share.CheckPerm(repoID, user) if perm != "" { if perm == "r" && op == "upload" { return &appError{nil, "", http.StatusForbidden} } info = new(permInfo) info.perm = perm info.expireTime = time.Now().Unix() + permExpireTime permCache.Store(fmt.Sprintf("%s:%s:%s", repoID, user, op), info) return nil } return &appError{nil, "", http.StatusForbidden} } func validateToken(r *http.Request, repoID string, skipCache bool) (string, *appError) { token := r.Header.Get("Seafile-Repo-Token") if token == "" { token = utils.GetAuthorizationToken(r.Header) if token == "" { msg := "token is null" return "", &appError{nil, msg, http.StatusBadRequest} } } if !skipCache { if value, ok := tokenCache.Load(token); ok { if info, ok := value.(*tokenInfo); ok { if info.repoID != repoID { msg := "Invalid token" return "", &appError{nil, msg, http.StatusForbidden} } return info.email, nil } } } email, err := repomgr.GetEmailByToken(repoID, token) if err != nil { log.Errorf("Failed to get email by token %s: %v", token, err) tokenCache.Delete(token) return email, &appError{err, "", http.StatusInternalServerError} } if email == "" { tokenCache.Delete(token) msg := fmt.Sprintf("Failed to get email by token %s", token) return email, &appError{nil, msg, http.StatusForbidden} } info := new(tokenInfo) info.email = email info.expireTime = time.Now().Unix() + tokenExpireTime info.repoID = repoID tokenCache.Store(token, info) return email, nil } func validateClientVer(clientVer string) int { versions := strings.Split(clientVer, ".") if len(versions) != 3 { return http.StatusBadRequest } if _, err := strconv.Atoi(versions[0]); err != nil { return http.StatusBadRequest } if _, err := strconv.Atoi(versions[1]); err != nil { return http.StatusBadRequest } if _, err := strconv.Atoi(versions[2]); err != nil { return http.StatusBadRequest } return http.StatusOK } func getClientIPAddr(r *http.Request) string { xForwardedFor := r.Header.Get("X-Forwarded-For") addr := strings.TrimSpace(strings.Split(xForwardedFor, ",")[0]) ip := net.ParseIP(addr) if ip != nil { return ip.String() } addr = strings.TrimSpace(r.Header.Get("X-Real-Ip")) ip = net.ParseIP(addr) if ip != nil { return ip.String() } if addr, _, err := net.SplitHostPort(strings.TrimSpace(r.RemoteAddr)); err == nil { ip = net.ParseIP(addr) if ip != nil { return ip.String() } } return "" } func onRepoOper(eType, repoID, user, ip, clientName string) { rData := new(repoEventData) vInfo, err := repomgr.GetVirtualRepoInfo(repoID) if err != nil { log.Errorf("Failed to get virtual repo info by repo id %s: %v", repoID, err) return } if vInfo != nil { rData.repoID = vInfo.OriginRepoID rData.path = vInfo.Path } else { rData.repoID = repoID } rData.eType = eType rData.user = user rData.ip = ip rData.clientName = clientName publishRepoEvent(rData) } func publishRepoEvent(rData *repoEventData) { if rData.path == "" { rData.path = "/" } data := make(map[string]interface{}) data["msg_type"] = rData.eType data["user_name"] = rData.user data["ip"] = rData.ip data["user_agent"] = rData.clientName data["repo_id"] = rData.repoID data["file_path"] = rData.path jsonData, err := json.Marshal(data) if err != nil { log.Warnf("Failed to publish event: %v", err) return } if _, err := rpcclient.Call("publish_event", seafileServerChannelEvent, string(jsonData)); err != nil { log.Warnf("Failed to publish event: %v", err) } } func publishUpdateEvent(repoID string, commitID string) { data := make(map[string]interface{}) data["msg_type"] = "repo-update" data["repo_id"] = repoID data["commit_id"] = commitID jsonData, err := json.Marshal(data) if err != nil { log.Warnf("Failed to publish event: %v", err) return } if _, err := rpcclient.Call("publish_event", seafileServerChannelEvent, string(jsonData)); err != nil { log.Warnf("Failed to publish event: %v", err) } } func removeSyncAPIExpireCache() { deleteTokens := func(key interface{}, value interface{}) bool { if info, ok := value.(*tokenInfo); ok { if info.expireTime <= time.Now().Unix() { tokenCache.Delete(key) } } return true } deletePerms := func(key interface{}, value interface{}) bool { if info, ok := value.(*permInfo); ok { if info.expireTime <= time.Now().Unix() { permCache.Delete(key) } } return true } deleteVirtualRepoInfo := func(key interface{}, value interface{}) bool { if info, ok := value.(*virtualRepoInfo); ok { if info.expireTime <= time.Now().Unix() { virtualRepoInfoCache.Delete(key) } } return true } tokenCache.Range(deleteTokens) permCache.Range(deletePerms) virtualRepoInfoCache.Range(deleteVirtualRepoInfo) } type collectFsInfo struct { startTime int64 isTimeout bool results []interface{} } var ErrTimeout = fmt.Errorf("get fs id list timeout") func calculateSendObjectList(ctx context.Context, repo *repomgr.Repo, serverHead string, clientHead string, dirOnly bool) ([]interface{}, error) { masterHead, err := commitmgr.Load(repo.ID, serverHead) if err != nil { err := fmt.Errorf("Failed to load server head commit %s:%s: %v", repo.ID, serverHead, err) return nil, err } var remoteHead *commitmgr.Commit remoteHeadRoot := emptySHA1 if clientHead != "" { remoteHead, err = commitmgr.Load(repo.ID, clientHead) if err != nil { err := fmt.Errorf("Failed to load remote head commit %s:%s: %v", repo.ID, clientHead, err) return nil, err } remoteHeadRoot = remoteHead.RootID } info := new(collectFsInfo) info.startTime = time.Now().Unix() if remoteHeadRoot != masterHead.RootID && masterHead.RootID != emptySHA1 { info.results = append(info.results, masterHead.RootID) } var opt *diff.DiffOptions if !dirOnly { opt = &diff.DiffOptions{ FileCB: collectFileIDs, DirCB: collectDirIDs, Ctx: ctx, RepoID: repo.StoreID} opt.Data = info } else { opt = &diff.DiffOptions{ FileCB: collectFileIDsNOp, DirCB: collectDirIDs, Ctx: ctx, RepoID: repo.StoreID} opt.Data = info } trees := []string{masterHead.RootID, remoteHeadRoot} if err := diff.DiffTrees(trees, opt); err != nil { if info.isTimeout { return nil, ErrTimeout } return nil, err } return info.results, nil } func collectFileIDs(ctx context.Context, baseDir string, files []*fsmgr.SeafDirent, data interface{}) error { select { case <-ctx.Done(): return context.Canceled default: } file1 := files[0] file2 := files[1] info, ok := data.(*collectFsInfo) if !ok { err := fmt.Errorf("failed to assert results") return err } if file1 != nil && (file2 == nil || file1.ID != file2.ID) && file1.ID != emptySHA1 { info.results = append(info.results, file1.ID) } return nil } func collectFileIDsNOp(ctx context.Context, baseDir string, files []*fsmgr.SeafDirent, data interface{}) error { return nil } func collectDirIDs(ctx context.Context, baseDir string, dirs []*fsmgr.SeafDirent, data interface{}, recurse *bool) error { select { case <-ctx.Done(): return context.Canceled default: } info, ok := data.(*collectFsInfo) if !ok { err := fmt.Errorf("failed to assert fs info") return err } dir1 := dirs[0] dir2 := dirs[1] if dir1 != nil && (dir2 == nil || dir1.ID != dir2.ID) && dir1.ID != emptySHA1 { info.results = append(info.results, dir1.ID) } if option.FsIdListRequestTimeout > 0 { now := time.Now().Unix() if now-info.startTime > option.FsIdListRequestTimeout { info.isTimeout = true return ErrTimeout } } return nil } ================================================ FILE: fileserver/utils/dup2.go ================================================ //go:build !(linux && arm64) package utils import ( "syscall" ) func Dup(from, to int) error { return syscall.Dup2(from, to) } ================================================ FILE: fileserver/utils/dup3.go ================================================ //go:build linux && arm64 package utils import ( "syscall" ) func Dup(from, to int) error { return syscall.Dup3(from, to, 0) } ================================================ FILE: fileserver/utils/http.go ================================================ package utils import ( "context" "encoding/json" "fmt" "io" "net/http" "strings" "time" ) func GetAuthorizationToken(h http.Header) string { auth := h.Get("Authorization") splitResult := strings.Split(auth, " ") if len(splitResult) > 1 { return splitResult[1] } return "" } func HttpCommon(method, url string, header map[string][]string, reader io.Reader) (int, []byte, error) { header["Content-Type"] = []string{"application/json"} header["User-Agent"] = []string{"Seafile Server"} ctx, cancel := context.WithTimeout(context.Background(), 45*time.Second) defer cancel() req, err := http.NewRequestWithContext(ctx, method, url, reader) if err != nil { return http.StatusInternalServerError, nil, err } req.Header = header rsp, err := http.DefaultClient.Do(req) if err != nil { return http.StatusInternalServerError, nil, err } defer rsp.Body.Close() if rsp.StatusCode != http.StatusOK { errMsg := parseErrorMessage(rsp.Body) return rsp.StatusCode, errMsg, fmt.Errorf("bad response %d for %s", rsp.StatusCode, url) } body, err := io.ReadAll(rsp.Body) if err != nil { return rsp.StatusCode, nil, err } return http.StatusOK, body, nil } func parseErrorMessage(r io.Reader) []byte { body, err := io.ReadAll(r) if err != nil { return nil } var objs map[string]string err = json.Unmarshal(body, &objs) if err != nil { return body } errMsg, ok := objs["error_msg"] if ok { return []byte(errMsg) } return body } ================================================ FILE: fileserver/utils/utils.go ================================================ package utils import ( "fmt" "time" jwt "github.com/golang-jwt/jwt/v5" "github.com/google/uuid" "github.com/haiwen/seafile-server/fileserver/option" ) func IsValidUUID(u string) bool { _, err := uuid.Parse(u) return err == nil } func IsObjectIDValid(objID string) bool { if len(objID) != 40 { return false } for i := 0; i < len(objID); i++ { c := objID[i] if (c >= '0' && c <= '9') || (c >= 'a' && c <= 'f') { continue } return false } return true } type SeahubClaims struct { Exp int64 `json:"exp"` IsInternal bool `json:"is_internal"` jwt.RegisteredClaims } func (*SeahubClaims) Valid() error { return nil } func GenSeahubJWTToken() (string, error) { claims := new(SeahubClaims) claims.Exp = time.Now().Add(time.Second * 300).Unix() claims.IsInternal = true token := jwt.NewWithClaims(jwt.GetSigningMethod("HS256"), claims) tokenString, err := token.SignedString([]byte(option.JWTPrivateKey)) if err != nil { err := fmt.Errorf("failed to gen seahub jwt token: %w", err) return "", err } return tokenString, nil } type MyClaims struct { Exp int64 `json:"exp"` RepoID string `json:"repo_id"` UserName string `json:"username"` jwt.RegisteredClaims } func (*MyClaims) Valid() error { return nil } func GenNotifJWTToken(repoID, user string, exp int64) (string, error) { claims := new(MyClaims) claims.Exp = exp claims.RepoID = repoID claims.UserName = user token := jwt.NewWithClaims(jwt.GetSigningMethod("HS256"), claims) tokenString, err := token.SignedString([]byte(option.JWTPrivateKey)) if err != nil { err := fmt.Errorf("failed to gen jwt token for repo %s: %w", repoID, err) return "", err } return tokenString, nil } ================================================ FILE: fileserver/virtual_repo.go ================================================ package main import ( "errors" "fmt" "path/filepath" "strings" "sync" "time" "math/rand" "github.com/haiwen/seafile-server/fileserver/commitmgr" "github.com/haiwen/seafile-server/fileserver/diff" "github.com/haiwen/seafile-server/fileserver/fsmgr" "github.com/haiwen/seafile-server/fileserver/option" "github.com/haiwen/seafile-server/fileserver/repomgr" "github.com/haiwen/seafile-server/fileserver/workerpool" log "github.com/sirupsen/logrus" ) const mergeVirtualRepoWorkerNumber = 5 var mergeVirtualRepoPool *workerpool.WorkPool var runningRepo = make(map[string]struct{}) var runningRepoMutex sync.Mutex func virtualRepoInit() { mergeVirtualRepoPool = workerpool.CreateWorkerPool(mergeVirtualRepo, mergeVirtualRepoWorkerNumber) } func mergeVirtualRepo(args ...interface{}) error { if len(args) < 1 { return nil } repoID := args[0].(string) virtual, err := repomgr.IsVirtualRepo(repoID) if err != nil { return err } if virtual { runningRepoMutex.Lock() if _, ok := runningRepo[repoID]; ok { log.Debugf("a task for repo %s is already running", repoID) go mergeVirtualRepoPool.AddTask(repoID) runningRepoMutex.Unlock() return nil } runningRepo[repoID] = struct{}{} runningRepoMutex.Unlock() err := mergeRepo(repoID) if err != nil { log.Errorf("%v", err) } runningRepoMutex.Lock() delete(runningRepo, repoID) runningRepoMutex.Unlock() go updateSizePool.AddTask(repoID) return nil } excludeRepo := "" if len(args) > 1 { excludeRepo = args[1].(string) } vRepos, _ := repomgr.GetVirtualRepoIDsByOrigin(repoID) for _, id := range vRepos { if id == excludeRepo { continue } runningRepoMutex.Lock() if _, ok := runningRepo[id]; ok { log.Debugf("a task for repo %s is already running", id) go mergeVirtualRepoPool.AddTask(id) runningRepoMutex.Unlock() continue } runningRepo[id] = struct{}{} runningRepoMutex.Unlock() err := mergeRepo(id) if err != nil { log.Errorf("%v", err) } runningRepoMutex.Lock() delete(runningRepo, id) runningRepoMutex.Unlock() } go updateSizePool.AddTask(repoID) return nil } func mergeRepo(repoID string) error { repo := repomgr.Get(repoID) if repo == nil { err := fmt.Errorf("failed to get virt repo %.10s", repoID) return err } vInfo := repo.VirtualInfo if vInfo == nil { return nil } origRepo := repomgr.Get(vInfo.OriginRepoID) if origRepo == nil { err := fmt.Errorf("failed to get orig repo %.10s", repoID) return err } head, err := commitmgr.Load(repo.ID, repo.HeadCommitID) if err != nil { err := fmt.Errorf("failed to get commit %s:%.8s", repo.ID, repo.HeadCommitID) return err } origHead, err := commitmgr.Load(origRepo.ID, origRepo.HeadCommitID) if err != nil { err := fmt.Errorf("merge repo %.8s failed: failed to get origin repo commit %s:%.8s", repoID, origRepo.ID, origRepo.HeadCommitID) return err } var origRoot string origRoot, err = fsmgr.GetSeafdirIDByPath(origRepo.StoreID, origHead.RootID, vInfo.Path) if err != nil && !errors.Is(err, fsmgr.ErrPathNoExist) { err := fmt.Errorf("merge repo %.10s failed: failed to get seafdir id by path in origin repo %.10s: %v", repoID, origRepo.StoreID, err) return err } if origRoot == "" { newPath, _ := handleMissingVirtualRepo(origRepo, origHead, vInfo) if newPath != "" { origRoot, _ = fsmgr.GetSeafdirIDByPath(origRepo.StoreID, origHead.RootID, newPath) } if origRoot == "" { return nil } } base, err := commitmgr.Load(origRepo.ID, vInfo.BaseCommitID) if err != nil { err := fmt.Errorf("merge repo %.8s failed: failed to get origin repo commit %s:%.8s", repoID, origRepo.ID, vInfo.BaseCommitID) return err } root := head.RootID baseRoot, _ := fsmgr.GetSeafdirIDByPath(origRepo.StoreID, base.RootID, vInfo.Path) if baseRoot == "" { err := fmt.Errorf("merge repo %.10s failed: cannot find seafdir for origin repo %.10s path %s", repoID, vInfo.OriginRepoID, vInfo.Path) return err } if root == origRoot { } else if baseRoot == root { _, err := updateDir(repoID, "/", origRoot, origHead.CreatorName, head.CommitID) if err != nil { err := fmt.Errorf("failed to update root of virtual repo %.10s", repoID) return err } repomgr.SetVirtualRepoBaseCommitPath(repo.ID, origRepo.HeadCommitID, vInfo.Path) } else if baseRoot == origRoot { newBaseCommit, err := updateDir(vInfo.OriginRepoID, vInfo.Path, root, head.CreatorName, origHead.CommitID) if err != nil { err := fmt.Errorf("merge repo %.8s failed: failed to update origin repo%.10s path %s", repoID, vInfo.OriginRepoID, vInfo.Path) return err } repomgr.SetVirtualRepoBaseCommitPath(repo.ID, newBaseCommit, vInfo.Path) cleanupVirtualRepos(vInfo.OriginRepoID) mergeVirtualRepo(vInfo.OriginRepoID, repoID) } else { roots := []string{baseRoot, origRoot, root} opt := new(mergeOptions) opt.remoteRepoID = repoID opt.remoteHead = head.CommitID err := mergeTrees(origRepo.StoreID, roots, opt) if err != nil { err := fmt.Errorf("failed to merge") return err } _, err = updateDir(repoID, "/", opt.mergedRoot, origHead.CreatorName, head.CommitID) if err != nil { err := fmt.Errorf("failed to update root of virtual repo %.10s", repoID) return err } newBaseCommit, err := updateDir(vInfo.OriginRepoID, vInfo.Path, opt.mergedRoot, head.CreatorName, origHead.CommitID) if err != nil { err := fmt.Errorf("merge repo %.10s failed: failed to update origin repo %.10s path %s", repoID, vInfo.OriginRepoID, vInfo.Path) return err } repomgr.SetVirtualRepoBaseCommitPath(repo.ID, newBaseCommit, vInfo.Path) cleanupVirtualRepos(vInfo.OriginRepoID) mergeVirtualRepo(vInfo.OriginRepoID, repoID) } return nil } func cleanupVirtualRepos(repoID string) error { repo := repomgr.Get(repoID) if repo == nil { err := fmt.Errorf("failed to get repo %.10s", repoID) return err } head, err := commitmgr.Load(repo.ID, repo.HeadCommitID) if err != nil { err := fmt.Errorf("failed to load commit %s/%s : %v", repo.ID, repo.HeadCommitID, err) return err } vRepos, err := repomgr.GetVirtualRepoInfoByOrigin(repoID) if err != nil { err := fmt.Errorf("failed to get virtual repo ids by origin repo %.10s", repoID) return err } for _, vInfo := range vRepos { _, err := fsmgr.GetSeafdirByPath(repo.StoreID, head.RootID, vInfo.Path) if err != nil { if err == fsmgr.ErrPathNoExist { handleMissingVirtualRepo(repo, head, vInfo) } } } return nil } func handleMissingVirtualRepo(repo *repomgr.Repo, head *commitmgr.Commit, vInfo *repomgr.VRepoInfo) (string, error) { parent, err := commitmgr.Load(head.RepoID, head.ParentID.String) if err != nil { err := fmt.Errorf("failed to load commit %s/%s : %v", head.RepoID, head.ParentID.String, err) return "", err } var results []*diff.DiffEntry err = diff.DiffCommits(parent, head, &results, true) if err != nil { err := fmt.Errorf("failed to diff commits") return "", err } parPath := vInfo.Path var isRenamed bool var subPath string var returnPath string for { var newPath string oldDirID, err := fsmgr.GetSeafdirIDByPath(repo.StoreID, parent.RootID, parPath) if err != nil || oldDirID == "" { if err == fsmgr.ErrPathNoExist { repomgr.DelVirtualRepo(vInfo.RepoID, option.CloudMode) } err := fmt.Errorf("failed to find %s under commit %s in repo %s", parPath, parent.CommitID, repo.StoreID) return "", err } for _, de := range results { if de.Status == diff.DiffStatusDirRenamed { if de.Sha1 == oldDirID { if subPath != "" { newPath = filepath.Join("/", de.NewName, subPath) } else { newPath = filepath.Join("/", de.NewName) } repomgr.SetVirtualRepoBaseCommitPath(vInfo.RepoID, head.CommitID, newPath) returnPath = newPath if subPath == "" { newName := filepath.Base(newPath) err := editRepo(vInfo.RepoID, newName, "Changed library name", "") if err != nil { log.Warnf("falied to rename repo %s.\n", newName) } } isRenamed = true break } } } if isRenamed { break } slash := strings.LastIndex(parPath, "/") if slash <= 0 { break } subPath = filepath.Base(parPath) parPath = filepath.Dir(parPath) } if !isRenamed { repomgr.DelVirtualRepo(vInfo.RepoID, option.CloudMode) } return returnPath, nil } func editRepo(repoID, name, desc, user string) error { if name == "" && desc == "" { err := fmt.Errorf("at least one argument should be non-null") return err } var retryCnt int for retry, err := editRepoNeedRetry(repoID, name, desc, user); err != nil || retry; { if err != nil { err := fmt.Errorf("failed to edit repo: %v", err) return err } if retryCnt < 3 { random := rand.Intn(10) + 1 time.Sleep(time.Duration(random*100) * time.Millisecond) retryCnt++ } else { err := fmt.Errorf("stop edit repo %s after 3 retries", repoID) return err } } return nil } func editRepoNeedRetry(repoID, name, desc, user string) (bool, error) { repo := repomgr.Get(repoID) if repo == nil { err := fmt.Errorf("no such library") return false, err } if name == "" { name = repo.Name } if desc == "" { desc = repo.Desc } parent, err := commitmgr.Load(repo.ID, repo.HeadCommitID) if err != nil { err := fmt.Errorf("failed to get commit %s:%s", repo.ID, repo.HeadCommitID) return false, err } if user == "" { user = parent.CreatorName } commit := commitmgr.NewCommit(repoID, parent.CommitID, parent.RootID, user, "Changed library name or description") repomgr.RepoToCommit(repo, commit) commit.RepoName = name commit.RepoDesc = desc err = commitmgr.Save(commit) if err != nil { err := fmt.Errorf("failed to add commit: %v", err) return false, err } _, err = updateBranch(repoID, repo.StoreID, commit.CommitID, parent.CommitID, "", false, "") if err != nil { return true, nil } repomgr.UpdateRepoInfo(repoID, commit.CommitID) return true, nil } ================================================ FILE: fileserver/workerpool/workerpool.go ================================================ package workerpool import ( "runtime/debug" "github.com/dgraph-io/ristretto/z" log "github.com/sirupsen/logrus" ) type WorkPool struct { jobs chan Job jobCB JobCB closer *z.Closer } // Job is the job object of workpool. type Job struct { callback JobCB args []interface{} } type JobCB func(args ...interface{}) error func CreateWorkerPool(jobCB JobCB, n int) *WorkPool { pool := new(WorkPool) pool.jobCB = jobCB pool.jobs = make(chan Job, 100) pool.closer = z.NewCloser(n) for i := 0; i < n; i++ { go pool.run(pool.jobs) } return pool } func (pool *WorkPool) AddTask(args ...interface{}) { job := Job{pool.jobCB, args} pool.jobs <- job } func (pool *WorkPool) run(jobs chan Job) { defer func() { if err := recover(); err != nil { log.Errorf("panic: %v\n%s", err, debug.Stack()) } }() defer pool.closer.Done() for { select { case job := <-pool.jobs: if job.callback != nil { err := job.callback(job.args...) if err != nil { log.Errorf("failed to call jobs: %v.\n", err) } } case <-pool.closer.HasBeenClosed(): return } } } func (pool *WorkPool) Shutdown() { pool.closer.SignalAndWait() } ================================================ FILE: fuse/Makefile.am ================================================ AM_CFLAGS = -DPKGDATADIR=\"$(pkgdatadir)\" \ -DPACKAGE_DATA_DIR=\""$(pkgdatadir)"\" \ -DSEAFILE_SERVER \ -I$(top_srcdir)/include \ -I$(top_srcdir)/lib \ -I$(top_builddir)/lib \ -I$(top_srcdir)/common \ @SEARPC_CFLAGS@ \ @GLIB2_CFLAGS@ \ @FUSE_CFLAGS@ \ @MYSQL_CFLAGS@ \ -Wall bin_PROGRAMS = seaf-fuse noinst_HEADERS = seaf-fuse.h seafile-session.h repo-mgr.h seaf_fuse_SOURCES = seaf-fuse.c \ seafile-session.c \ file.c \ getattr.c \ readdir.c \ repo-mgr.c \ ../common/block-mgr.c \ ../common/user-mgr.c \ ../common/group-mgr.c \ ../common/org-mgr.c \ ../common/block-backend.c \ ../common/block-backend-fs.c \ ../common/branch-mgr.c \ ../common/commit-mgr.c \ ../common/fs-mgr.c \ ../common/log.c \ ../common/seaf-db.c \ ../common/seaf-utils.c \ ../common/obj-store.c \ ../common/obj-backend-fs.c \ ../common/obj-backend-riak.c \ ../common/seafile-crypt.c \ ../common/password-hash.c seaf_fuse_LDADD = @GLIB2_LIBS@ @GOBJECT_LIBS@ @SSL_LIBS@ @LIB_RT@ @LIB_UUID@ \ -lsqlite3 @LIBEVENT_LIBS@ \ $(top_builddir)/common/cdc/libcdc.la \ @SEARPC_LIBS@ @JANSSON_LIBS@ @FUSE_LIBS@ @ZLIB_LIBS@ \ @MYSQL_LIBS@ -lsqlite3 @ARGON2_LIBS@ ================================================ FILE: fuse/file.c ================================================ #include "common.h" #define FUSE_USE_VERSION 26 #include #include #include #include #include "log.h" #include "utils.h" #include "seaf-fuse.h" int read_file(SeafileSession *seaf, const char *store_id, int version, Seafile *file, char *buf, size_t size, off_t offset, struct fuse_file_info *info) { BlockHandle *handle = NULL;; BlockMetadata *bmd; char *blkid; char *ptr; off_t off = 0, nleft; int i, n, ret = -EIO; for (i = 0; i < file->n_blocks; i++) { blkid = file->blk_sha1s[i]; bmd = seaf_block_manager_stat_block(seaf->block_mgr, store_id, version, blkid); if (!bmd) return -EIO; if (offset < off + bmd->size) { g_free (bmd); break; } off += bmd->size; g_free (bmd); } /* beyond the file size */ if (i == file->n_blocks) return 0; nleft = size; ptr = buf; while (nleft > 0 && i < file->n_blocks) { blkid = file->blk_sha1s[i]; handle = seaf_block_manager_open_block(seaf->block_mgr, store_id, version, blkid, BLOCK_READ); if (!handle) { seaf_warning ("Failed to open block %s:%s.\n", store_id, blkid); return -EIO; } /* trim the offset in a block */ if (offset > off) { char *tmp = (char *)malloc(sizeof(char) * (offset - off)); if (!tmp) return -ENOMEM; n = seaf_block_manager_read_block(seaf->block_mgr, handle, tmp, offset-off); if (n != offset - off) { seaf_warning ("Failed to read block %s:%s.\n", store_id, blkid); free (tmp); goto out; } off += n; free(tmp); } if ((n = seaf_block_manager_read_block(seaf->block_mgr, handle, ptr, nleft)) < 0) { seaf_warning ("Failed to read block %s:%s.\n", store_id, blkid); goto out; } nleft -= n; ptr += n; off += n; ++i; /* At this point we should have read all the content of the block or * have read up to @size bytes. So it's safe to close the block. */ seaf_block_manager_close_block(seaf->block_mgr, handle); seaf_block_manager_block_handle_free (seaf->block_mgr, handle); } return size - nleft; out: if (handle) { seaf_block_manager_close_block(seaf->block_mgr, handle); seaf_block_manager_block_handle_free (seaf->block_mgr, handle); } return ret; } ================================================ FILE: fuse/getattr.c ================================================ #include "common.h" #define FUSE_USE_VERSION 26 #include #include #include #include #include "log.h" #include "utils.h" #include "seaf-fuse.h" #include "seafile-session.h" #include "seaf-utils.h" static CcnetEmailUser *get_user_from_ccnet (SearpcClient *client, const char *user) { return (CcnetEmailUser *)searpc_client_call__object (client, "get_emailuser", CCNET_TYPE_EMAIL_USER, NULL, 1, "string", user); } static int getattr_root(SeafileSession *seaf, struct stat *stbuf) { stbuf->st_mode = S_IFDIR | 0755; stbuf->st_nlink = 2; stbuf->st_size = 4096; return 0; } static int getattr_user(SeafileSession *seaf, const char *user, struct stat *stbuf) { CcnetEmailUser *emailuser; emailuser = ccnet_user_manager_get_emailuser (seaf->user_mgr, user, NULL); if (!emailuser) { return -ENOENT; } g_object_unref (emailuser); stbuf->st_mode = S_IFDIR | 0755; stbuf->st_nlink = 2; stbuf->st_size = 4096; return 0; } static int getattr_repo(SeafileSession *seaf, const char *user, const char *repo_id, const char *repo_path, struct stat *stbuf) { SeafRepo *repo = NULL; SeafBranch *branch; SeafCommit *commit = NULL; guint32 mode = 0; char *id = NULL; int ret = 0; repo = seaf_repo_manager_get_repo(seaf->repo_mgr, repo_id); if (!repo) { seaf_warning ("Failed to get repo %s.\n", repo_id); ret = -ENOENT; goto out; } branch = repo->head; commit = seaf_commit_manager_get_commit(seaf->commit_mgr, repo->id, repo->version, branch->commit_id); if (!commit) { seaf_warning ("Failed to get commit %s:%.8s.\n", repo->id, branch->commit_id); ret = -ENOENT; goto out; } id = seaf_fs_manager_path_to_obj_id(seaf->fs_mgr, repo->store_id, repo->version, commit->root_id, repo_path, &mode, NULL); if (!id) { seaf_warning ("Path %s doesn't exist in repo %s.\n", repo_path, repo_id); ret = -ENOENT; goto out; } if (S_ISDIR(mode)) { SeafDir *dir; GList *l; int cnt = 2; /* '.' and '..' */ dir = seaf_fs_manager_get_seafdir(seaf->fs_mgr, repo->store_id, repo->version, id); if (dir) { for (l = dir->entries; l; l = l->next) cnt++; } if (strcmp (repo_path, "/") != 0) { // get dirent of the dir SeafDirent *dirent = seaf_fs_manager_get_dirent_by_path (seaf->fs_mgr, repo->store_id, repo->version, commit->root_id, repo_path, NULL); if (dirent && repo->version != 0) stbuf->st_mtime = dirent->mtime; seaf_dirent_free (dirent); } stbuf->st_size += cnt * sizeof(SeafDirent); stbuf->st_mode = mode | 0755; stbuf->st_nlink = 2; seaf_dir_free (dir); } else if (S_ISREG(mode)) { Seafile *file; file = seaf_fs_manager_get_seafile(seaf->fs_mgr, repo->store_id, repo->version, id); if (file) stbuf->st_size = file->file_size; SeafDirent *dirent = seaf_fs_manager_get_dirent_by_path (seaf->fs_mgr, repo->store_id, repo->version, commit->root_id, repo_path, NULL); if (dirent && repo->version != 0) stbuf->st_mtime = dirent->mtime; stbuf->st_mode = mode | 0644; stbuf->st_nlink = 1; seaf_dirent_free (dirent); seafile_unref (file); } else { return -ENOENT; } out: g_free (id); seaf_repo_unref (repo); seaf_commit_unref (commit); return ret; } int do_getattr(SeafileSession *seaf, const char *path, struct stat *stbuf) { int n_parts; char *user, *repo_id, *repo_path; int ret = 0; if (parse_fuse_path (path, &n_parts, &user, &repo_id, &repo_path) < 0) { return -ENOENT; } switch (n_parts) { case 0: ret = getattr_root(seaf, stbuf); break; case 1: ret = getattr_user(seaf, user, stbuf); break; case 2: case 3: ret = getattr_repo(seaf, user, repo_id, repo_path, stbuf); break; } g_free (user); g_free (repo_id); g_free (repo_path); return ret; } ================================================ FILE: fuse/readdir.c ================================================ #include "common.h" #define FUSE_USE_VERSION 26 #include #include #include #include #include "log.h" #include "utils.h" #include "seaf-fuse.h" #include "seafile-session.h" #include "seaf-utils.h" static char *replace_slash (const char *repo_name) { char *ret = g_strdup(repo_name); char *p; for (p = ret; *p != 0; ++p) if (*p == '/') *p = '_'; return ret; } static GList *get_users_from_ccnet (SearpcClient *client, const char *source) { return searpc_client_call__objlist (client, "get_emailusers", CCNET_TYPE_EMAIL_USER, NULL, 3, "string", source, "int", -1, "int", -1); } static CcnetEmailUser *get_user_from_ccnet (SearpcClient *client, const char *user) { return (CcnetEmailUser *)searpc_client_call__object (client, "get_emailuser", CCNET_TYPE_EMAIL_USER, NULL, 1, "string", user); } static int readdir_root(SeafileSession *seaf, void *buf, fuse_fill_dir_t filler, off_t offset, struct fuse_file_info *info) { GList *users, *p; CcnetEmailUser *user; const char *email; GHashTable *user_hash; int dummy; user_hash = g_hash_table_new_full (g_str_hash, g_str_equal, g_free, NULL); users = ccnet_user_manager_get_emailusers (seaf->user_mgr, "DB", -1, -1, NULL); for (p = users; p; p = p->next) { user = p->data; email = ccnet_email_user_get_email (user); g_hash_table_insert (user_hash, g_strdup(email), &dummy); g_object_unref (user); } g_list_free (users); users = ccnet_user_manager_get_emailusers (seaf->user_mgr, "LDAPImport", -1, -1, NULL); for (p = users; p; p = p->next) { user = p->data; email = ccnet_email_user_get_email (user); g_hash_table_insert (user_hash, g_strdup(email), &dummy); g_object_unref (user); } g_list_free (users); users = g_hash_table_get_keys (user_hash); for (p = users; p; p = p->next) { email = p->data; char *exclude = g_hash_table_lookup (seaf->excluded_users, email); if (exclude) continue; filler (buf, email, NULL, 0); } g_list_free (users); g_hash_table_destroy (user_hash); return 0; } static int readdir_user(SeafileSession *seaf, const char *user, void *buf, fuse_fill_dir_t filler, off_t offset, struct fuse_file_info *info) { CcnetEmailUser *emailuser; GList *list = NULL, *p; GString *name; emailuser = ccnet_user_manager_get_emailuser (seaf->user_mgr, user, NULL); if (!emailuser) { return -ENOENT; } g_object_unref (emailuser); list = seaf_repo_manager_get_repos_by_owner (seaf->repo_mgr, user); if (!list) { return 0; } for (p = list; p; p = p->next) { SeafRepo *repo = (SeafRepo *)p->data; /* Don't list virtual repos. */ if (seaf_repo_manager_is_virtual_repo(seaf->repo_mgr, repo->id)) { seaf_repo_unref (repo); continue; } // Don't list encrypted repo if (repo->encrypted) { continue; } char *clean_repo_name = replace_slash (repo->name); name = g_string_new (""); g_string_printf (name, "%s_%s", repo->id, clean_repo_name); filler(buf, name->str, NULL, 0); g_string_free (name, TRUE); g_free (clean_repo_name); seaf_repo_unref (repo); } g_list_free (list); return 0; } static int readdir_repo(SeafileSession *seaf, const char *user, const char *repo_id, const char *repo_path, void *buf, fuse_fill_dir_t filler, off_t offset, struct fuse_file_info *info) { SeafRepo *repo = NULL; SeafBranch *branch; SeafCommit *commit = NULL; SeafDir *dir = NULL; GList *l; int ret = 0; repo = seaf_repo_manager_get_repo(seaf->repo_mgr, repo_id); if (!repo) { seaf_warning ("Failed to get repo %s.\n", repo_id); ret = -ENOENT; goto out; } branch = repo->head; commit = seaf_commit_manager_get_commit(seaf->commit_mgr, repo->id, repo->version, branch->commit_id); if (!commit) { seaf_warning ("Failed to get commit %s:%.8s.\n", repo->id, branch->commit_id); ret = -ENOENT; goto out; } dir = seaf_fs_manager_get_seafdir_by_path(seaf->fs_mgr, repo->store_id, repo->version, commit->root_id, repo_path, NULL); if (!dir) { seaf_warning ("Path %s doesn't exist in repo %s.\n", repo_path, repo_id); ret = -ENOENT; goto out; } for (l = dir->entries; l; l = l->next) { SeafDirent *seaf_dent = (SeafDirent *) l->data; /* FIXME: maybe we need to return stbuf */ filler(buf, seaf_dent->name, NULL, 0); } out: seaf_repo_unref (repo); seaf_commit_unref (commit); seaf_dir_free (dir); return ret; } int do_readdir(SeafileSession *seaf, const char *path, void *buf, fuse_fill_dir_t filler, off_t offset, struct fuse_file_info *info) { int n_parts; char *user, *repo_id, *repo_path; int ret = 0; if (parse_fuse_path (path, &n_parts, &user, &repo_id, &repo_path) < 0) { return -ENOENT; } switch (n_parts) { case 0: ret = readdir_root(seaf, buf, filler, offset, info); break; case 1: ret = readdir_user(seaf, user, buf, filler, offset, info); break; case 2: case 3: ret = readdir_repo(seaf, user, repo_id, repo_path, buf, filler, offset, info); break; } g_free (user); g_free (repo_id); g_free (repo_path); return ret; } ================================================ FILE: fuse/repo-mgr.c ================================================ /* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */ #include "common.h" #include #include "utils.h" #include "log.h" #include "seafile-session.h" #include "commit-mgr.h" #include "branch-mgr.h" #include "repo-mgr.h" #include "fs-mgr.h" #include "seafile-error.h" #include "seaf-db.h" #define INDEX_DIR "index" struct _SeafRepoManagerPriv { }; static SeafRepo * load_repo (SeafRepoManager *manager, const char *repo_id); gboolean is_repo_id_valid (const char *id) { if (!id) return FALSE; return is_uuid_valid (id); } SeafRepo* seaf_repo_new (const char *id, const char *name, const char *desc) { SeafRepo* repo; /* valid check */ repo = g_new0 (SeafRepo, 1); memcpy (repo->id, id, 36); repo->id[36] = '\0'; repo->name = g_strdup(name); repo->desc = g_strdup(desc); repo->ref_cnt = 1; return repo; } void seaf_repo_free (SeafRepo *repo) { if (repo->name) g_free (repo->name); if (repo->desc) g_free (repo->desc); if (repo->category) g_free (repo->category); if (repo->head) seaf_branch_unref (repo->head); g_free (repo); } void seaf_repo_ref (SeafRepo *repo) { g_atomic_int_inc (&repo->ref_cnt); } void seaf_repo_unref (SeafRepo *repo) { if (!repo) return; if (g_atomic_int_dec_and_test (&repo->ref_cnt)) seaf_repo_free (repo); } static void set_head_common (SeafRepo *repo, SeafBranch *branch) { if (repo->head) seaf_branch_unref (repo->head); repo->head = branch; seaf_branch_ref(branch); } void seaf_repo_from_commit (SeafRepo *repo, SeafCommit *commit) { repo->name = g_strdup (commit->repo_name); repo->desc = g_strdup (commit->repo_desc); repo->encrypted = commit->encrypted; repo->no_local_history = commit->no_local_history; repo->version = commit->version; } void seaf_repo_to_commit (SeafRepo *repo, SeafCommit *commit) { commit->repo_name = g_strdup (repo->name); commit->repo_desc = g_strdup (repo->desc); commit->encrypted = repo->encrypted; commit->no_local_history = repo->no_local_history; commit->version = repo->version; } static gboolean collect_commit (SeafCommit *commit, void *vlist, gboolean *stop) { GList **commits = vlist; /* The traverse function will unref the commit, so we need to ref it. */ seaf_commit_ref (commit); *commits = g_list_prepend (*commits, commit); return TRUE; } GList * seaf_repo_get_commits (SeafRepo *repo) { GList *branches; GList *ptr; SeafBranch *branch; GList *commits = NULL; branches = seaf_branch_manager_get_branch_list (seaf->branch_mgr, repo->id); if (branches == NULL) { seaf_warning ("Failed to get branch list of repo %s.\n", repo->id); return NULL; } for (ptr = branches; ptr != NULL; ptr = ptr->next) { branch = ptr->data; gboolean res = seaf_commit_manager_traverse_commit_tree (seaf->commit_mgr, repo->id, repo->version, branch->commit_id, collect_commit, &commits, FALSE); if (!res) { for (ptr = commits; ptr != NULL; ptr = ptr->next) seaf_commit_unref ((SeafCommit *)(ptr->data)); g_list_free (commits); goto out; } } commits = g_list_reverse (commits); out: for (ptr = branches; ptr != NULL; ptr = ptr->next) { seaf_branch_unref ((SeafBranch *)ptr->data); } return commits; } #if 0 static int compare_repo (const SeafRepo *srepo, const SeafRepo *trepo) { return g_strcmp0 (srepo->id, trepo->id); } #endif SeafRepoManager* seaf_repo_manager_new (SeafileSession *seaf) { SeafRepoManager *mgr = g_new0 (SeafRepoManager, 1); mgr->priv = g_new0 (SeafRepoManagerPriv, 1); mgr->seaf = seaf; return mgr; } int seaf_repo_manager_init (SeafRepoManager *mgr) { return 0; } int seaf_repo_manager_start (SeafRepoManager *mgr) { return 0; } static gboolean repo_exists_in_db (SeafDB *db, const char *id) { char sql[256]; gboolean db_err = FALSE; snprintf (sql, sizeof(sql), "SELECT repo_id FROM Repo WHERE repo_id = '%s'", id); return seaf_db_check_for_existence (db, sql, &db_err); } SeafRepo* seaf_repo_manager_get_repo (SeafRepoManager *manager, const gchar *id) { SeafRepo repo; int len = strlen(id); if (len >= 37) return NULL; memcpy (repo.id, id, len + 1); if (repo_exists_in_db (manager->seaf->db, id)) { SeafRepo *ret = load_repo (manager, id); if (!ret) return NULL; /* seaf_repo_ref (ret); */ return ret; } return NULL; } gboolean seaf_repo_manager_repo_exists (SeafRepoManager *manager, const gchar *id) { SeafRepo repo; memcpy (repo.id, id, 37); return repo_exists_in_db (manager->seaf->db, id); } static void load_repo_commit (SeafRepoManager *manager, SeafRepo *repo, SeafBranch *branch) { SeafCommit *commit; commit = seaf_commit_manager_get_commit_compatible (manager->seaf->commit_mgr, repo->id, branch->commit_id); if (!commit) { seaf_warning ("Commit %s is missing\n", branch->commit_id); repo->is_corrupted = TRUE; return; } set_head_common (repo, branch); seaf_repo_from_commit (repo, commit); seaf_commit_unref (commit); } static gboolean load_virtual_info (SeafDBRow *row, void *vrepo_id) { char *ret_repo_id = vrepo_id; const char *origin_repo_id; origin_repo_id = seaf_db_row_get_column_text (row, 0); memcpy (ret_repo_id, origin_repo_id, 37); return FALSE; } char * get_origin_repo_id (SeafRepoManager *mgr, const char *repo_id) { char sql[256]; char origin_repo_id[37]; memset (origin_repo_id, 0, 37); snprintf (sql, 256, "SELECT origin_repo FROM VirtualRepo " "WHERE repo_id = '%s'", repo_id); seaf_db_foreach_selected_row (seaf->db, sql, load_virtual_info, origin_repo_id); if (origin_repo_id[0] != 0) return g_strdup(origin_repo_id); else return NULL; } static SeafRepo * load_repo (SeafRepoManager *manager, const char *repo_id) { SeafRepo *repo; SeafBranch *branch; repo = seaf_repo_new(repo_id, NULL, NULL); if (!repo) { seaf_warning ("[repo mgr] failed to alloc repo.\n"); return NULL; } repo->manager = manager; branch = seaf_branch_manager_get_branch (seaf->branch_mgr, repo_id, "master"); if (!branch) { seaf_warning ("Failed to get master branch of repo %.8s.\n", repo_id); repo->is_corrupted = TRUE; } else { load_repo_commit (manager, repo, branch); seaf_branch_unref (branch); } if (repo->is_corrupted) { seaf_warning ("Repo %.8s is corrupted.\n", repo->id); seaf_repo_free (repo); return NULL; } char *origin_repo_id = get_origin_repo_id (manager, repo->id); if (origin_repo_id) memcpy (repo->store_id, origin_repo_id, 36); else memcpy (repo->store_id, repo->id, 36); g_free (origin_repo_id); return repo; } static gboolean collect_repo_id (SeafDBRow *row, void *data) { GList **p_ids = data; const char *repo_id; repo_id = seaf_db_row_get_column_text (row, 0); *p_ids = g_list_prepend (*p_ids, g_strdup(repo_id)); return TRUE; } GList * seaf_repo_manager_get_repo_id_list (SeafRepoManager *mgr) { GList *ret = NULL; char sql[256]; snprintf (sql, 256, "SELECT repo_id FROM Repo"); if (seaf_db_foreach_selected_row (mgr->seaf->db, sql, collect_repo_id, &ret) < 0) return NULL; return ret; } GList * seaf_repo_manager_get_repo_list (SeafRepoManager *mgr, int start, int limit) { GList *id_list = NULL, *ptr; GList *ret = NULL; SeafRepo *repo; char sql[256]; if (start == -1 && limit == -1) snprintf (sql, 256, "SELECT repo_id FROM Repo"); else snprintf (sql, 256, "SELECT repo_id FROM Repo LIMIT %d, %d", start, limit); if (seaf_db_foreach_selected_row (mgr->seaf->db, sql, collect_repo_id, &id_list) < 0) return NULL; for (ptr = id_list; ptr; ptr = ptr->next) { char *repo_id = ptr->data; repo = seaf_repo_manager_get_repo (mgr, repo_id); if (repo != NULL) ret = g_list_prepend (ret, repo); } string_list_free (id_list); return g_list_reverse (ret); } GList * seaf_repo_manager_get_repos_by_owner (SeafRepoManager *mgr, const char *email) { GList *id_list = NULL, *ptr; GList *ret = NULL; char sql[256]; snprintf (sql, 256, "SELECT repo_id FROM RepoOwner WHERE owner_id='%s'", email); if (seaf_db_foreach_selected_row (mgr->seaf->db, sql, collect_repo_id, &id_list) < 0) return NULL; for (ptr = id_list; ptr; ptr = ptr->next) { char *repo_id = ptr->data; SeafRepo *repo = seaf_repo_manager_get_repo (mgr, repo_id); if (repo != NULL) ret = g_list_prepend (ret, repo); } string_list_free (id_list); return ret; } gboolean seaf_repo_manager_is_virtual_repo (SeafRepoManager *mgr, const char *repo_id) { char sql[256]; gboolean db_err; snprintf (sql, 256, "SELECT 1 FROM VirtualRepo WHERE repo_id = '%s'", repo_id); return seaf_db_check_for_existence (seaf->db, sql, &db_err); } ================================================ FILE: fuse/repo-mgr.h ================================================ /* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */ #ifndef SEAF_REPO_MGR_H #define SEAF_REPO_MGR_H #include #include "seafile-object.h" #include "commit-mgr.h" #include "branch-mgr.h" struct _SeafRepoManager; typedef struct _SeafRepo SeafRepo; struct _SeafRepo { struct _SeafRepoManager *manager; gchar id[37]; gchar *name; gchar *desc; gchar *category; /* not used yet */ gboolean encrypted; int enc_version; gchar magic[33]; /* hash(repo_id + passwd), key stretched. */ gboolean no_local_history; SeafBranch *head; gboolean is_corrupted; gboolean delete_pending; int ref_cnt; int version; /* Used to access fs and block sotre. * This id is different from repo_id when this repo is virtual. * Virtual repos share fs and block store with its origin repo. * However, commit store for each repo is always independent. * So always use repo_id to access commit store. */ gchar store_id[37]; }; gboolean is_repo_id_valid (const char *id); SeafRepo* seaf_repo_new (const char *id, const char *name, const char *desc); void seaf_repo_free (SeafRepo *repo); void seaf_repo_ref (SeafRepo *repo); void seaf_repo_unref (SeafRepo *repo); typedef struct _SeafRepoManager SeafRepoManager; typedef struct _SeafRepoManagerPriv SeafRepoManagerPriv; struct _SeafRepoManager { struct _SeafileSession *seaf; SeafRepoManagerPriv *priv; }; SeafRepoManager* seaf_repo_manager_new (struct _SeafileSession *seaf); int seaf_repo_manager_init (SeafRepoManager *mgr); int seaf_repo_manager_start (SeafRepoManager *mgr); int seaf_repo_manager_add_repo (SeafRepoManager *mgr, SeafRepo *repo); int seaf_repo_manager_del_repo (SeafRepoManager *mgr, SeafRepo *repo); SeafRepo* seaf_repo_manager_get_repo (SeafRepoManager *manager, const gchar *id); gboolean seaf_repo_manager_repo_exists (SeafRepoManager *manager, const gchar *id); GList* seaf_repo_manager_get_repo_list (SeafRepoManager *mgr, int start, int limit); GList * seaf_repo_manager_get_repo_id_list (SeafRepoManager *mgr); GList * seaf_repo_manager_get_repos_by_owner (SeafRepoManager *mgr, const char *email); gboolean seaf_repo_manager_is_virtual_repo (SeafRepoManager *mgr, const char *repo_id); #endif ================================================ FILE: fuse/seaf-fuse.c ================================================ #include "common.h" #include #include #define FUSE_USE_VERSION 26 #include #include #include #include #include #include "log.h" #include "utils.h" #include "seaf-fuse.h" SeafileSession *seaf = NULL; static char *parse_repo_id (const char *repo_id_name) { if (strlen(repo_id_name) < 36) return NULL; return g_strndup(repo_id_name, 36); } /* * Path format can be: * 1. / --> list all users * 2. /user --> list libraries owned by user * 3. /user/repo-id_name --> list root of the library * 4. /user/repo-id_name/repo_path --> list library content */ int parse_fuse_path (const char *path, int *n_parts, char **user, char **repo_id, char **repo_path) { char **tokens; int n; int ret = 0; *user = NULL; *repo_id = NULL; *repo_path = NULL; if (*path == '/') ++path; tokens = g_strsplit (path, "/", 3); n = g_strv_length (tokens); *n_parts = n; switch (n) { case 0: break; case 1: *user = g_strdup(tokens[0]); break; case 2: *repo_id = parse_repo_id(tokens[1]); if (*repo_id == NULL) { ret = -1; break; } *user = g_strdup(tokens[0]); *repo_path = g_strdup("/"); break; case 3: *repo_id = parse_repo_id(tokens[1]); if (*repo_id == NULL) { ret = -1; break; } *user = g_strdup(tokens[0]); *repo_path = g_strdup(tokens[2]); break; } g_strfreev (tokens); return ret; } static int seaf_fuse_getattr(const char *path, struct stat *stbuf) { memset(stbuf, 0, sizeof(struct stat)); return do_getattr(seaf, path, stbuf); } static int seaf_fuse_readdir(const char *path, void *buf, fuse_fill_dir_t filler, off_t offset, struct fuse_file_info *info) { filler(buf, ".", NULL, 0); filler(buf, "..", NULL, 0); return do_readdir(seaf, path, buf, filler, offset, info); } static int seaf_fuse_open(const char *path, struct fuse_file_info *info) { int n_parts; char *user, *repo_id, *repo_path; SeafRepo *repo = NULL; SeafBranch *branch = NULL; SeafCommit *commit = NULL; guint32 mode = 0; int ret = 0; /* Now we only support read-only mode */ if ((info->flags & 3) != O_RDONLY) return -EACCES; if (parse_fuse_path (path, &n_parts, &user, &repo_id, &repo_path) < 0) { seaf_warning ("Invalid input path %s.\n", path); return -ENOENT; } if (n_parts != 2 && n_parts != 3) { seaf_warning ("Invalid input path for open: %s.\n", path); ret = -EACCES; goto out; } repo = seaf_repo_manager_get_repo(seaf->repo_mgr, repo_id); if (!repo) { seaf_warning ("Failed to get repo %s.\n", repo_id); ret = -ENOENT; goto out; } branch = repo->head; commit = seaf_commit_manager_get_commit(seaf->commit_mgr, repo->id, repo->version, branch->commit_id); if (!commit) { seaf_warning ("Failed to get commit %s:%.8s.\n", repo->id, branch->commit_id); ret = -ENOENT; goto out; } char *id = seaf_fs_manager_path_to_obj_id(seaf->fs_mgr, repo->store_id, repo->version, commit->root_id, repo_path, &mode, NULL); if (!id) { seaf_warning ("Path %s doesn't exist in repo %s.\n", repo_path, repo_id); ret = -ENOENT; goto out; } g_free (id); if (!S_ISREG(mode)) return -EACCES; out: g_free (user); g_free (repo_id); g_free (repo_path); seaf_repo_unref (repo); seaf_commit_unref (commit); return ret; } static int seaf_fuse_read(const char *path, char *buf, size_t size, off_t offset, struct fuse_file_info *info) { int n_parts; char *user, *repo_id, *repo_path; SeafRepo *repo = NULL; SeafBranch *branch = NULL; SeafCommit *commit = NULL; Seafile *file = NULL; char *file_id = NULL; int ret = 0; /* Now we only support read-only mode */ if ((info->flags & 3) != O_RDONLY) return -EACCES; if (parse_fuse_path (path, &n_parts, &user, &repo_id, &repo_path) < 0) { seaf_warning ("Invalid input path %s.\n", path); return -ENOENT; } if (n_parts != 2 && n_parts != 3) { seaf_warning ("Invalid input path for open: %s.\n", path); ret = -EACCES; goto out; } repo = seaf_repo_manager_get_repo(seaf->repo_mgr, repo_id); if (!repo) { seaf_warning ("Failed to get repo %s.\n", repo_id); ret = -ENOENT; goto out; } branch = repo->head; commit = seaf_commit_manager_get_commit(seaf->commit_mgr, repo->id, repo->version, branch->commit_id); if (!commit) { seaf_warning ("Failed to get commit %s:%.8s.\n", repo->id, branch->commit_id); ret = -ENOENT; goto out; } file_id = seaf_fs_manager_get_seafile_id_by_path(seaf->fs_mgr, repo->store_id, repo->version, commit->root_id, repo_path, NULL); if (!file_id) { seaf_warning ("Path %s doesn't exist in repo %s.\n", repo_path, repo_id); ret = -ENOENT; goto out; } file = seaf_fs_manager_get_seafile(seaf->fs_mgr, repo->store_id, repo->version, file_id); if (!file) { ret = -ENOENT; goto out; } ret = read_file(seaf, repo->store_id, repo->version, file, buf, size, offset, info); seafile_unref (file); out: g_free (user); g_free (repo_id); g_free (repo_path); g_free (file_id); seaf_repo_unref (repo); seaf_commit_unref (commit); return ret; } struct options { char *central_config_dir; char *config_dir; char *seafile_dir; char *log_file; } options; #define SEAF_FUSE_OPT_KEY(t, p, v) { t, offsetof(struct options, p), v } enum { KEY_VERSION, KEY_HELP, }; static struct fuse_opt seaf_fuse_opts[] = { SEAF_FUSE_OPT_KEY("-c %s", config_dir, 0), SEAF_FUSE_OPT_KEY("--config %s", config_dir, 0), SEAF_FUSE_OPT_KEY("-F %s", central_config_dir, 0), SEAF_FUSE_OPT_KEY("--central-config-dir %s", central_config_dir, 0), SEAF_FUSE_OPT_KEY("-d %s", seafile_dir, 0), SEAF_FUSE_OPT_KEY("--seafdir %s", seafile_dir, 0), SEAF_FUSE_OPT_KEY("-l %s", log_file, 0), SEAF_FUSE_OPT_KEY("--logfile %s", log_file, 0), FUSE_OPT_KEY("-V", KEY_VERSION), FUSE_OPT_KEY("--version", KEY_VERSION), FUSE_OPT_KEY("-h", KEY_HELP), FUSE_OPT_KEY("--help", KEY_HELP), FUSE_OPT_END }; static struct fuse_operations seaf_fuse_ops = { .getattr = seaf_fuse_getattr, .readdir = seaf_fuse_readdir, .open = seaf_fuse_open, .read = seaf_fuse_read, }; int main(int argc, char *argv[]) { struct fuse_args args = FUSE_ARGS_INIT(argc, argv); const char *debug_str = NULL; char *config_dir = DEFAULT_CONFIG_DIR; char *central_config_dir = NULL; char *seafile_dir = NULL; char *logfile = NULL; char *ccnet_debug_level_str = "info"; char *seafile_debug_level_str = "debug"; int ret; memset(&options, 0, sizeof(struct options)); if (fuse_opt_parse(&args, &options, seaf_fuse_opts, NULL) == -1) { seaf_warning("Parse argument Failed\n"); exit(1); } #if !GLIB_CHECK_VERSION(2,36,0) g_type_init(); #endif config_dir = options.config_dir ? : DEFAULT_CONFIG_DIR; config_dir = ccnet_expand_path (config_dir); central_config_dir = options.central_config_dir; if (!debug_str) debug_str = g_getenv("SEAFILE_DEBUG"); seafile_debug_set_flags_string(debug_str); if (!options.seafile_dir) seafile_dir = g_build_filename(config_dir, "seafile", NULL); else seafile_dir = options.seafile_dir; if (!options.log_file) logfile = g_build_filename(seafile_dir, "seaf-fuse.log", NULL); else logfile = options.log_file; if (seafile_log_init(logfile, ccnet_debug_level_str, seafile_debug_level_str, "seaf-fuse") < 0) { fprintf (stderr, "Failed to init log.\n"); exit(1); } seaf = seafile_session_new(central_config_dir, seafile_dir, config_dir); if (!seaf) { seaf_warning("Failed to create seafile session.\n"); exit(1); } if (seafile_session_init(seaf) < 0) { seaf_warning("Failed to init seafile session.\n"); exit(1); } set_syslog_config (seaf->config); ret = fuse_main(args.argc, args.argv, &seaf_fuse_ops, NULL); fuse_opt_free_args(&args); return ret; } ================================================ FILE: fuse/seaf-fuse.h ================================================ #ifndef SEAF_FUSE_H #define SEAF_FUSE_H #include "seafile-session.h" int parse_fuse_path (const char *path, int *n_parts, char **user, char **repo_id, char **repo_path); SeafDirent * fuse_get_dirent_by_path (SeafFSManager *mgr, const char *repo_id, int version, const char *root_id, const char *path); /* file.c */ int read_file(SeafileSession *seaf, const char *store_id, int version, Seafile *file, char *buf, size_t size, off_t offset, struct fuse_file_info *info); /* getattr.c */ int do_getattr(SeafileSession *seaf, const char *path, struct stat *stbuf); /* readdir.c */ int do_readdir(SeafileSession *seaf, const char *path, void *buf, fuse_fill_dir_t filler, off_t offset, struct fuse_file_info *info); #endif /* SEAF_FUSE_H */ ================================================ FILE: fuse/seafile-session.c ================================================ #include "common.h" #include #include #include #include #include #include "seafile-session.h" #include "seaf-utils.h" #include "log.h" static int read_excluded_users (SeafileSession *session); SeafileSession * seafile_session_new(const char *central_config_dir, const char *seafile_dir, const char *ccnet_dir) { char *abs_central_config_dir = NULL; char *abs_seafile_dir; char *abs_ccnet_dir = NULL; char *tmp_file_dir; char *config_file_path; struct stat st; GKeyFile *config; SeafileSession *session = NULL; abs_ccnet_dir = ccnet_expand_path (ccnet_dir); abs_seafile_dir = ccnet_expand_path (seafile_dir); tmp_file_dir = g_build_filename(abs_seafile_dir, "tmpfiles", NULL); if (central_config_dir) { abs_central_config_dir = ccnet_expand_path (central_config_dir); } config_file_path = g_build_filename( abs_central_config_dir ? abs_central_config_dir : abs_seafile_dir, "seafile.conf", NULL); if (g_stat(abs_seafile_dir, &st) < 0 || !S_ISDIR(st.st_mode)) { seaf_warning ("Seafile data dir %s does not exist and is unable to create\n", abs_seafile_dir); goto onerror; } if (g_stat(tmp_file_dir, &st) < 0 || !S_ISDIR(st.st_mode)) { seaf_warning("Seafile tmp dir %s does not exist and is unable to create\n", tmp_file_dir); goto onerror; } if (g_stat(abs_ccnet_dir, &st) < 0 || !S_ISDIR(st.st_mode)) { seaf_warning("Ccnet dir %s does not exist and is unable to create\n", abs_ccnet_dir); goto onerror; } GError *error = NULL; config = g_key_file_new (); if (!g_key_file_load_from_file (config, config_file_path, G_KEY_FILE_NONE, &error)) { seaf_warning ("Failed to load config file.\n"); g_free (config_file_path); g_key_file_free (config); goto onerror; } g_free (config_file_path); session = g_new0(SeafileSession, 1); session->seaf_dir = abs_seafile_dir; session->ccnet_dir = abs_ccnet_dir; session->tmp_file_dir = tmp_file_dir; session->config = config; session->excluded_users = g_hash_table_new_full (g_str_hash, g_str_equal, g_free, NULL); if (load_database_config (session) < 0) { seaf_warning ("Failed to load database config.\n"); goto onerror; } if (load_ccnet_database_config (session) < 0) { seaf_warning ("Failed to load ccnet database config.\n"); goto onerror; } if (read_excluded_users (session) < 0) { seaf_warning ("Failed to load excluded users.\n"); goto onerror; } session->fs_mgr = seaf_fs_manager_new (session, abs_seafile_dir); if (!session->fs_mgr) goto onerror; session->block_mgr = seaf_block_manager_new (session, abs_seafile_dir); if (!session->block_mgr) goto onerror; session->commit_mgr = seaf_commit_manager_new (session); if (!session->commit_mgr) goto onerror; session->repo_mgr = seaf_repo_manager_new (session); if (!session->repo_mgr) goto onerror; session->branch_mgr = seaf_branch_manager_new (session); if (!session->branch_mgr) goto onerror; session->user_mgr = ccnet_user_manager_new (session); if (!session->user_mgr) goto onerror; session->group_mgr = ccnet_group_manager_new (session); if (!session->group_mgr) goto onerror; return session; onerror: free (abs_seafile_dir); free (abs_ccnet_dir); g_free (session); return NULL; } static int read_excluded_users (SeafileSession *session) { char *users; int l, i; char *hash_value; users = seaf_key_file_get_string (session->config, "fuse", "excluded_users", NULL); if (!users) return 0; char **parts = g_strsplit_set(users, " ,", 0); l = g_strv_length(parts); if (l > 0) hash_value = g_new0(char, 1); for (i = 0; i < l; i++) { if (g_strcmp0(parts[i], "") == 0) continue; g_hash_table_insert (session->excluded_users, g_strdup(parts[i]), hash_value); } g_strfreev (parts); g_free (users); return 0; } int seafile_session_init (SeafileSession *session) { if (seaf_commit_manager_init (session->commit_mgr) < 0) return -1; if (seaf_fs_manager_init (session->fs_mgr) < 0) return -1; if (seaf_branch_manager_init (session->branch_mgr) < 0) return -1; if (seaf_repo_manager_init (session->repo_mgr) < 0) return -1; if (ccnet_user_manager_prepare (session->user_mgr) < 0) { seaf_warning ("Failed to init user manager.\n"); return -1; } if (ccnet_group_manager_prepare (session->group_mgr) < 0) { seaf_warning ("Failed to init group manager.\n"); return -1; } return 0; } int seafile_session_start (SeafileSession *session) { return 0; } ================================================ FILE: fuse/seafile-session.h ================================================ #ifndef SEAFILE_SESSION_H #define SEAFILE_SESSION_H #include #include #include #include "block-mgr.h" #include "fs-mgr.h" #include "branch-mgr.h" #include "commit-mgr.h" #include "repo-mgr.h" #include "user-mgr.h" #include "group-mgr.h" #include "org-mgr.h" typedef struct _SeafileSession SeafileSession; struct _SeafileSession { char *seaf_dir; char *ccnet_dir; char *tmp_file_dir; /* Config that's only loaded on start */ GKeyFile *config; SeafDB *db; SeafDB *ccnet_db; SeafDB *seahub_db; SeafBlockManager *block_mgr; SeafFSManager *fs_mgr; SeafBranchManager *branch_mgr; SeafCommitManager *commit_mgr; SeafRepoManager *repo_mgr; CcnetUserManager *user_mgr; CcnetGroupManager *group_mgr; CcnetOrgManager *org_mgr; GHashTable *excluded_users; gboolean create_tables; gboolean ccnet_create_tables; }; extern SeafileSession *seaf; SeafileSession * seafile_session_new(const char *central_config_dir, const char *seafile_dir, const char *ccnet_dir); int seafile_session_init (SeafileSession *session); int seafile_session_start (SeafileSession *session); #endif ================================================ FILE: include/Makefile.am ================================================ noinst_HEADERS = seafile-rpc.h seafile-error.h ================================================ FILE: include/seafile-error.h ================================================ #ifndef SEAFILE_ERROR_H #define SEAFILE_ERROR_H #define SEAF_ERR_GENERAL 500 #define SEAF_ERR_BAD_REPO 501 #define SEAF_ERR_BAD_COMMIT 502 #define SEAF_ERR_BAD_ARGS 503 #define SEAF_ERR_INTERNAL 504 #define SEAF_ERR_BAD_FILE 505 #define SEAF_ERR_BAD_RELAY 506 #define SEAF_ERR_LIST_COMMITS 507 #define SEAF_ERR_REPO_AUTH 508 #define SEAF_ERR_GC_NOT_STARTED 509 #define SEAF_ERR_MONITOR_NOT_CONNECTED 510 #define SEAF_ERR_BAD_DIR_ID 511 #define SEAF_ERR_NO_WORKTREE 512 #define SEAF_ERR_BAD_PEER_ID 513 #define SEAF_ERR_REPO_LOCKED 514 #define SEAF_ERR_DIR_MISSING 515 #define SEAF_ERR_PATH_NO_EXIST 516 /* the dir or file pointed by this path not exists */ #define POST_FILE_ERR_FILENAME 517 #define POST_FILE_ERR_BLOCK_MISSING 518 #define POST_FILE_ERR_QUOTA_FULL 519 #define SEAF_ERR_CONCURRENT_UPLOAD 520 #define SEAF_ERR_FILES_WITH_SAME_NAME 521 #define SEAF_ERR_GC_CONFLICT 522 #endif ================================================ FILE: include/seafile-rpc.h ================================================ #ifndef _SEAFILE_RPC_H #define _SEAFILE_RPC_H #include "seafile-object.h" /** * seafile_get_session_info: * * Returns: a SeafileSessionInfo object. */ GObject * seafile_get_session_info (GError **error); /** * seafile_get_repo_list: * * Returns repository list. */ GList* seafile_get_repo_list (int start, int limit, const char *order_by, int ret_virt_repo, GError **error); gint64 seafile_count_repos (GError **error); /** * seafile_get_trash_repo_list: * * Returns deleted repository list. */ GList* seafile_get_trash_repo_list(int start, int limit, GError **error); int seafile_del_repo_from_trash (const char *repo_id, GError **error); int seafile_restore_repo_from_trash (const char *repo_id, GError **error); GList * seafile_get_trash_repos_by_owner (const char *owner, GError **error); int seafile_empty_repo_trash (GError **error); int seafile_empty_repo_trash_by_owner (const char *owner, GError **error); /** * seafile_get_commit_list: * * @limit: if limit <= 0, all commits start from @offset will be returned. * * Returns: commit list of a given repo. * * Possible Error: * 1. Bad Argument * 2. No head and branch master * 3. Failed to list commits */ GList* seafile_get_commit_list (const gchar *repo, int offset, int limit, GError **error); /** * seafile_get_commit: * @id: the commit id. * * Returns: the commit object. */ GObject* seafile_get_commit (const char *repo_id, int version, const gchar *id, GError **error); /** * seafile_get_repo: * * Returns: repo */ GObject* seafile_get_repo (const gchar* id, GError **error); GObject * seafile_get_repo_sync_task (const char *repo_id, GError **error); /** * seafile_get_repo_sync_info: */ GObject * seafile_get_repo_sync_info (const char *repo_id, GError **error); GList* seafile_get_repo_sinfo (const char *repo_id, GError **error); /* [seafile_get_config] returns the value of the config entry whose name is * [key] in config.db */ char *seafile_get_config (const char *key, GError **error); /* [seafile_set_config] set the value of config key in config.db; old value * would be overwritten. */ int seafile_set_config (const char *key, const char *value, GError **error); int seafile_set_config_int (const char *key, int value, GError **error); int seafile_get_config_int (const char *key, GError **error); int seafile_set_upload_rate_limit (int limit, GError **error); int seafile_set_download_rate_limit (int limit, GError **error); /** * seafile_destroy_repo: * @repo_id: repository id. */ int seafile_destroy_repo (const gchar *repo_id, GError **error); int seafile_unsync_repos_by_account (const char *server_addr, const char *email, GError **error); int seafile_remove_repo_tokens_by_account (const char *server_addr, const char *email, GError **error); int seafile_set_repo_token (const char *repo_id, const char *token, GError **error); int seafile_get_download_rate(GError **error); int seafile_get_upload_rate(GError **error); /** * seafile_edit_repo: * @repo_id: repository id. * @name: new name of the repository, NULL if unchanged. * @description: new description of the repository, NULL if unchanged. */ int seafile_edit_repo (const gchar *repo_id, const gchar *name, const gchar *description, const gchar *user, GError **error); int seafile_change_repo_passwd (const char *repo_id, const char *old_passwd, const char *new_passwd, const char *user, GError **error); int seafile_upgrade_repo_pwd_hash_algorithm (const char *repo_id, const char *user, const char *passwd, const char *pwd_hash_algo, const char *pwd_hash_params, GError **error); /** * seafile_repo_size: * * Returns: the size of a repo * * Possible Error: * 1. Bad Argument * 2. No local branch (No local branch record in branch.db) * 3. Database error * 4. Calculate branch size error */ gint64 seafile_repo_size(const gchar *repo_id, GError **error); int seafile_repo_last_modify(const char *repo_id, GError **error); int seafile_set_repo_lantoken (const gchar *repo_id, const gchar *token, GError **error); gchar* seafile_get_repo_lantoken (const gchar *repo_id, GError **error); int seafile_set_repo_property (const char *repo_id, const char *key, const char *value, GError **error); gchar * seafile_get_repo_property (const char *repo_id, const char *key, GError **error); char * seafile_get_repo_relay_address (const char *repo_id, GError **error); char * seafile_get_repo_relay_port (const char *repo_id, GError **error); int seafile_update_repo_relay_info (const char *repo_id, const char *new_addr, const char *new_port, GError **error); int seafile_update_repos_server_host (const char *old_host, const char *new_host, const char *new_server_url, GError **error); int seafile_disable_auto_sync (GError **error); int seafile_enable_auto_sync (GError **error); int seafile_is_auto_sync_enabled (GError **error); char * seafile_get_path_sync_status (const char *repo_id, const char *path, int is_dir, GError **error); int seafile_mark_file_locked (const char *repo_id, const char *path, GError **error); int seafile_mark_file_unlocked (const char *repo_id, const char *path, GError **error); char * seafile_get_server_property (const char *server_url, const char *key, GError **error); int seafile_set_server_property (const char *server_url, const char *key, const char *value, GError **error); /** * seafile_list_dir: * List a directory. * * Returns: a list of dirents. * * @limit: if limit <= 0, all dirents start from @offset will be returned. */ GList * seafile_list_dir (const char *repo_id, const char *dir_id, int offset, int limit, GError **error); /** * seafile_list_file_blocks: * List the blocks of a file. * * Returns: a list of block ids speprated by '\n'. * * @limit: if limit <= 0, all blocks start from @offset will be returned. */ char * seafile_list_file_blocks (const char *repo_id, const char *file_id, int offset, int limit, GError **error); /** * seafile_list_dir_by_path: * List a directory in a commit by the path of the directory. * * Returns: a list of dirents. */ GList * seafile_list_dir_by_path (const char *repo_id, const char *commit_id, const char *path, GError **error); /** * seafile_get_dir_id_by_commit_and_path: * Get the dir_id of the path * * Returns: the dir_id of the path */ char * seafile_get_dir_id_by_commit_and_path (const char *repo_id, const char *commit_id, const char *path, GError **error); /** * seafile_revert: * Reset the repo to a previous state by creating a new commit. */ int seafile_revert (const char *repo_id, const char *commit, GError **error); char * seafile_gen_default_worktree (const char *worktree_parent, const char *repo_name, GError **error); int seafile_check_path_for_clone(const char *path, GError **error); /** * seafile_clone: * * Fetch a new repo and then check it out. */ char * seafile_clone (const char *repo_id, int repo_version, const char *peer_id, const char *repo_name, const char *worktree, const char *token, const char *passwd, const char *magic, const char *peer_addr, const char *peer_port, const char *email, const char *random_key, int enc_version, const char *more_info, GError **error); char * seafile_download (const char *repo_id, int repo_version, const char *peer_id, const char *repo_name, const char *wt_parent, const char *token, const char *passwd, const char *magic, const char *peer_addr, const char *peer_port, const char *email, const char *random_key, int enc_version, const char *more_info, GError **error); int seafile_cancel_clone_task (const char *repo_id, GError **error); int seafile_remove_clone_task (const char *repo_id, GError **error); /** * seafile_get_clone_tasks: * * Get a list of clone tasks. */ GList * seafile_get_clone_tasks (GError **error); /** * seafile_sync: * * Sync a repo with relay. */ int seafile_sync (const char *repo_id, const char *peer_id, GError **error); /** * seafile_get_total_block_size: * * Get the sum of size of all the blocks. */ gint64 seafile_get_total_block_size (GError **error); /** * seafile_get_commit_tree_block_number: * * Get the number of blocks belong to the commit tree. * * @commit_id: the head of the commit tree. * * Returns: -1 if the calculation is in progress, -2 if error, >=0 otherwise. */ int seafile_get_commit_tree_block_number (const char *commit_id, GError **error); /** * seafile_gc: * Start garbage collection. */ int seafile_gc (GError **error); /** * seafile_gc_get_progress: * Get progress of GC. * * Returns: * progress of GC in precentage. * -1 if GC is not running. */ /* int */ /* seafile_gc_get_progress (GError **error); */ /* ----------------- Task Related -------------- */ /** * seafile_find_transfer: * * Find a non finished task of a repo */ GObject * seafile_find_transfer_task (const char *repo_id, GError *error); int seafile_cancel_task (const gchar *task_id, int task_type, GError **error); /** * Remove finished upload task */ int seafile_remove_task (const char *task_id, int task_type, GError **error); /* ------------------ Relay specific RPC calls. ------------ */ /** * seafile_diff: * * Show the difference between @old commit and @new commit. If @old is NULL, then * show the difference between @new commit and its parent. * * @old and @new can also be branch name. */ GList * seafile_diff (const char *repo_id, const char *old, const char *new, int fold_dir_results, GError **error); GList * seafile_branch_gets (const char *repo_id, GError **error); /** * Return 1 if user is the owner of repo, otherwise return 0. */ int seafile_is_repo_owner (const char *email, const char *repo_id, GError **error); int seafile_set_repo_owner(const char *repo_id, const char *email, GError **error); /** * Return owner id of repo */ char * seafile_get_repo_owner(const char *repo_id, GError **error); GList * seafile_get_orphan_repo_list(GError **error); GList * seafile_list_owned_repos (const char *email, int ret_corrupted, int start, int limit, GError **error); GList * seafile_search_repos_by_name(const char *name, GError **error); /** * seafile_add_chunk_server: * @server: ID for the chunk server. * * Add a chunk server on a relay server. */ int seafile_add_chunk_server (const char *server, GError **error); /** * seafile_del_chunk_server: * @server: ID for the chunk server. * * Delete a chunk server on a relay server. */ int seafile_del_chunk_server (const char *server, GError **error); /** * seafile_list_chunk_servers: * * List chunk servers set on a relay server. */ char *seafile_list_chunk_servers (GError **error); gint64 seafile_get_user_quota_usage (const char *email, GError **error); gint64 seafile_get_user_share_usage (const char *email, GError **error); gint64 seafile_server_repo_size(const char *repo_id, GError **error); int seafile_repo_set_access_property (const char *repo_id, const char *ap, GError **error); char * seafile_repo_query_access_property (const char *repo_id, GError **error); char * seafile_web_get_access_token (const char *repo_id, const char *obj_id, const char *op, const char *username, int use_onetime, GError **error); GObject * seafile_web_query_access_token (const char *token, GError **error); char * seafile_query_zip_progress (const char *token, GError **error); int seafile_cancel_zip_task (const char *token, GError **error); GObject * seafile_get_checkout_task (const char *repo_id, GError **error); GList * seafile_get_sync_task_list (GError **error); char * seafile_share_subdir_to_user (const char *repo_id, const char *path, const char *owner, const char *share_user, const char *permission, const char *passwd, GError **error); int seafile_unshare_subdir_for_user (const char *repo_id, const char *path, const char *owner, const char *share_user, GError **error); int seafile_update_share_subdir_perm_for_user (const char *repo_id, const char *path, const char *owner, const char *share_user, const char *permission, GError **error); int seafile_add_share (const char *repo_id, const char *from_email, const char *to_email, const char *permission, GError **error); GList * seafile_list_share_repos (const char *email, const char *type, int start, int limit, GError **error); GList * seafile_list_repo_shared_to (const char *from_user, const char *repo_id, GError **error); GList * seafile_list_repo_shared_group (const char *from_user, const char *repo_id, GError **error); int seafile_remove_share (const char *repo_id, const char *from_email, const char *to_email, GError **error); char * seafile_share_subdir_to_group (const char *repo_id, const char *path, const char *owner, int share_group, const char *permission, const char *passwd, GError **error); int seafile_unshare_subdir_for_group (const char *repo_id, const char *path, const char *owner, int share_group, GError **error); int seafile_update_share_subdir_perm_for_group (const char *repo_id, const char *path, const char *owner, int share_group, const char *permission, GError **error); int seafile_group_share_repo (const char *repo_id, int group_id, const char *user_name, const char *permission, GError **error); int seafile_group_unshare_repo (const char *repo_id, int group_id, const char *user_name, GError **error); /* Get groups that a repo is shared to */ char * seafile_get_shared_groups_by_repo(const char *repo_id, GError **error); char * seafile_get_group_repoids (int group_id, GError **error); GList * seafile_get_repos_by_group (int group_id, GError **error); GList * seafile_get_group_repos_by_owner (char *user, GError **error); char * seafile_get_group_repo_owner (const char *repo_id, GError **error); int seafile_remove_repo_group(int group_id, const char *username, GError **error); gint64 seafile_get_file_size (const char *store_id, int version, const char *file_id, GError **error); gint64 seafile_get_dir_size (const char *store_id, int version, const char *dir_id, GError **error); int seafile_set_repo_history_limit (const char *repo_id, int days, GError **error); int seafile_get_repo_history_limit (const char *repo_id, GError **error); int seafile_set_repo_valid_since (const char *repo_id, gint64 timestamp, GError **error); int seafile_check_passwd (const char *repo_id, const char *magic, GError **error); int seafile_set_passwd (const char *repo_id, const char *user, const char *passwd, GError **error); int seafile_unset_passwd (const char *repo_id, const char *user, GError **error); int seafile_is_passwd_set (const char *repo_id, const char *user, GError **error); GObject * seafile_get_decrypt_key (const char *repo_id, const char *user, GError **error); int seafile_revert_on_server (const char *repo_id, const char *commit_id, const char *user_name, GError **error); /** * Add a file into the repo on server. * The content of the file is stored in a temporary file. * @repo_id: repo id * @temp_file_path: local file path, should be a temp file just uploaded. * @parent_dir: the parent directory to put the file in. * @file_name: the name of the target file. * @user: the email of the user who uploaded the file. */ int seafile_post_file (const char *repo_id, const char *temp_file_path, const char *parent_dir, const char *file_name, const char *user, GError **error); /** * Add multiple files at once. * * @filenames_json: json array of filenames * @paths_json: json array of temp file paths */ char * seafile_post_multi_files (const char *repo_id, const char *parent_dir, const char *filenames_json, const char *paths_json, const char *user, int replace, GError **error); /** * Add file blocks at once. * * @blocks_json: json array of block ids * @paths_json: json array of temp file paths */ /* char * */ /* seafile_post_file_blocks (const char *repo_id, */ /* const char *parent_dir, */ /* const char *file_name, */ /* const char *blockids_json, */ /* const char *paths_json, */ /* const char *user, */ /* gint64 file_size, */ /* int replace_existed, */ /* GError **error); */ int seafile_post_empty_file (const char *repo_id, const char *parent_dir, const char *new_file_name, const char *user, GError **error); /** * Update an existing file in a repo * @params: same as seafile_post_file * @head_id: the commit id for the original file version. * It's optional. If it's NULL, the current repo head will be used. * @return The new file id */ char * seafile_put_file (const char *repo_id, const char *temp_file_path, const char *parent_dir, const char *file_name, const char *user, const char *head_id, GError **error); /** * Add file blocks at once. * * @blocks_json: json array of block ids * @paths_json: json array of temp file paths */ /* char * */ /* seafile_put_file_blocks (const char *repo_id, const char *parent_dir, */ /* const char *file_name, const char *blockids_json, */ /* const char *paths_json, const char *user, */ /* const char *head_id, gint64 file_size, GError **error); */ int seafile_post_dir (const char *repo_id, const char *parent_dir, const char *new_dir_name, const char *user, GError **error); int seafile_mkdir_with_parents (const char *repo_id, const char *parent_dir, const char *new_dir_path, const char *user, GError **error); /** * delete a file/directory from the repo on server. * @repo_id: repo id * @parent_dir: the parent directory of the file to be deleted * @file_name: the name of the target file. * @user: the email of the user who uploaded the file. */ int seafile_del_file (const char *repo_id, const char *parent_dir, const char *file_name, const char *user, GError **error); int seafile_batch_del_files (const char *repo_id, const char *file_list, const char *user, GError **error); /** * copy a file/directory from a repo to another on server. */ GObject * seafile_copy_file (const char *src_repo_id, const char *src_dir, const char *src_filename, const char *dst_repo_id, const char *dst_dir, const char *dst_filename, const char *user, int need_progress, int synchronous, GError **error); GObject * seafile_move_file (const char *src_repo_id, const char *src_dir, const char *src_filename, const char *dst_repo_id, const char *dst_dir, const char *dst_filename, int replace, const char *user, int need_progress, int synchronous, GError **error); GObject * seafile_get_copy_task (const char *task_id, GError **error); int seafile_cancel_copy_task (const char *task_id, GError **error); int seafile_rename_file (const char *repo_id, const char *parent_dir, const char *oldname, const char *newname, const char *user, GError **error); /** * Return non-zero if filename is valid. */ int seafile_is_valid_filename (const char *repo_id, const char *filename, GError **error); int seafile_set_user_quota (const char *user, gint64 quota, GError **error); gint64 seafile_get_user_quota (const char *user, GError **error); int seafile_check_quota (const char *repo_id, gint64 delta, GError **error); GList * seafile_list_user_quota_usage (GError **error); char * seafile_get_file_id_by_path (const char *repo_id, const char *path, GError **error); char * seafile_get_dir_id_by_path (const char *repo_id, const char *path, GError **error); GObject * seafile_get_dirent_by_path (const char *repo_id, const char *path, GError **error); /** * Return a list of commits where every commit contains a unique version of * the file. */ GList * seafile_list_file_revisions (const char *repo_id, const char *commit_id, const char *path, int limit, GError **error); GList * seafile_calc_files_last_modified (const char *repo_id, const char *parent_dir, int limit, GError **error); int seafile_revert_file (const char *repo_id, const char *commit_id, const char *path, const char *user, GError **error); int seafile_revert_dir (const char *repo_id, const char *commit_id, const char *path, const char *user, GError **error); char * seafile_check_repo_blocks_missing (const char *repo_id, const char *blockids_json, GError **error); /* * @show_days: return deleted files in how many days, return all if 0. */ GList * seafile_get_deleted (const char *repo_id, int show_days, const char *path, const char *scan_stat, int limit, GError **error); /** * Generate a new token for (repo_id, email) and return it */ char * seafile_generate_repo_token (const char *repo_id, const char *email, GError **error); int seafile_delete_repo_token (const char *repo_id, const char *token, const char *user, GError **error); GList * seafile_list_repo_tokens (const char *repo_id, GError **error); GList * seafile_list_repo_tokens_by_email (const char *email, GError **error); int seafile_delete_repo_tokens_by_peer_id(const char *email, const char *peer_id, GError **error); int seafile_delete_repo_tokens_by_email (const char *email, GError **error); /** * create a repo on seahub */ char * seafile_create_repo (const char *repo_name, const char *repo_desc, const char *owner_email, const char *passwd, int enc_version, const char *pwd_hash_algo, const char *pwd_hash_params, GError **error); char * seafile_create_enc_repo (const char *repo_id, const char *repo_name, const char *repo_desc, const char *owner_email, const char *magic, const char *random_key, const char *salt, int enc_version, const char *pwd_hash, const char *pwd_hash_algo, const char *pwd_hash_params, GError **error); char * seafile_check_permission (const char *repo_id, const char *user, GError **error); char * seafile_check_permission_by_path (const char *repo_id, const char *path, const char *user, GError **error); GList * seafile_list_dir_with_perm (const char *repo_id, const char *path, const char *dir_id, const char *user, int offset, int limit, GError **error); int seafile_set_inner_pub_repo (const char *repo_id, const char *permission, GError **error); int seafile_unset_inner_pub_repo (const char *repo_id, GError **error); GList * seafile_list_inner_pub_repos (GError **error); gint64 seafile_count_inner_pub_repos (GError **error); GList * seafile_list_inner_pub_repos_by_owner (const char *user, GError **error); int seafile_is_inner_pub_repo (const char *repo_id, GError **error); int seafile_set_share_permission (const char *repo_id, const char *from_email, const char *to_email, const char *permission, GError **error); int seafile_set_group_repo_permission (int group_id, const char *repo_id, const char *permission, GError **error); char * seafile_get_file_id_by_commit_and_path(const char *repo_id, const char *commit_id, const char *path, GError **error); /* virtual repo related */ char * seafile_create_virtual_repo (const char *origin_repo_id, const char *path, const char *repo_name, const char *repo_desc, const char *owner, const char *passwd, GError **error); GList * seafile_get_virtual_repos_by_owner (const char *owner, GError **error); GObject * seafile_get_virtual_repo (const char *origin_repo, const char *path, const char *owner, GError **error); char * seafile_get_system_default_repo_id (GError **error); /* Clean trash */ int seafile_clean_up_repo_history (const char *repo_id, int keep_days, GError **error); /* ------------------ public RPC calls. ------------ */ GList* seafile_get_repo_list_pub (int start, int limit, GError **error); GObject* seafile_get_repo_pub (const gchar* id, GError **error); GList* seafile_get_commit_list_pub (const gchar *repo, int offset, int limit, GError **error); GObject* seafile_get_commit_pub (const gchar *id, GError **error); char *seafile_diff_pub (const char *repo_id, const char *old, const char *new, GError **error); GList * seafile_list_dir_pub (const char *dir_id, GError **error); GList * seafile_get_shared_users_for_subdir (const char *repo_id, const char *path, const char *from_user, GError **error); GList * seafile_get_shared_groups_for_subdir (const char *repo_id, const char *path, const char *from_user, GError **error); GObject * seafile_generate_magic_and_random_key(int enc_version, const char* repo_id, const char *passwd, GError **error); gint64 seafile_get_total_file_number (GError **error); gint64 seafile_get_total_storage (GError **error); GObject * seafile_get_file_count_info_by_path (const char *repo_id, const char *path, GError **error); char * seafile_get_trash_repo_owner (const char *repo_id, GError **error); int seafile_set_server_config_int (const char *group, const char *key, int value, GError **error); int seafile_get_server_config_int (const char *group, const char *key, GError **error); int seafile_set_server_config_int64 (const char *group, const char *key, gint64 value, GError **error); gint64 seafile_get_server_config_int64 (const char *group, const char *key, GError **error); int seafile_set_server_config_string (const char *group, const char *key, const char *value, GError **error); char * seafile_get_server_config_string (const char *group, const char *key, GError **error); int seafile_set_server_config_boolean (const char *group, const char *key, int value, GError **error); int seafile_get_server_config_boolean (const char *group, const char *key, GError **error); GObject * seafile_get_group_shared_repo_by_path (const char *repo_id, const char *path, int group_id, int is_org, GError **error); GObject * seafile_get_shared_repo_by_path (const char *repo_id, const char *path, const char *shared_to, int is_org, GError **error); GList * seafile_get_group_repos_by_user (const char *user, GError **error); GList * seafile_get_org_group_repos_by_user (const char *user, int org_id, GError **error); int seafile_repo_has_been_shared (const char *repo_id, int including_groups, GError **error); GList * seafile_get_shared_users_by_repo (const char *repo_id, GError **error); GList * seafile_org_get_shared_users_by_repo (int org_id, const char *repo_id, GError **error); gint64 seafile_get_upload_tmp_file_offset (const char *repo_id, const char *file_path, GError **error); char * seafile_convert_repo_path (const char *repo_id, const char *path, const char *user, int is_org, GError **error); int seafile_set_repo_status(const char *repo_id, int status, GError **error); int seafile_get_repo_status(const char *repo_id, GError **error); GList* seafile_get_repos_by_id_prefix (const char *id_prefix, int start, int limit, GError **error); int seafile_publish_event(const char *channel, const char *content, GError **error); json_t * seafile_pop_event(const char *channel, GError **error); GList * seafile_search_files (const char *repo_id, const char *str, GError **error); GList * seafile_search_files_by_path (const char *repo_id, const char *path, const char *str, GError **error); /*Following is ccnet rpc*/ int ccnet_rpc_add_emailuser (const char *email, const char *passwd, int is_staff, int is_active, GError **error); int ccnet_rpc_remove_emailuser (const char *source, const char *email, GError **error); int ccnet_rpc_validate_emailuser (const char *email, const char *passwd, GError **error); GObject* ccnet_rpc_get_emailuser (const char *email, GError **error); GObject* ccnet_rpc_get_emailuser_with_import (const char *email, GError **error); GObject* ccnet_rpc_get_emailuser_by_id (int id, GError **error); GList* ccnet_rpc_get_emailusers (const char *source, int start, int limit, const char *status, GError **error); GList* ccnet_rpc_search_emailusers (const char *source, const char *email_patt, int start, int limit, GError **error); GList* ccnet_rpc_search_ldapusers (const char *keyword, int start, int limit, GError **error); /* Get total counts of email users. */ gint64 ccnet_rpc_count_emailusers (const char *source, GError **error); gint64 ccnet_rpc_count_inactive_emailusers (const char *source, GError **error); int ccnet_rpc_update_emailuser (const char *source, int id, const char* passwd, int is_staff, int is_active, GError **error); int ccnet_rpc_update_role_emailuser (const char* email, const char* role, GError **error); GList* ccnet_rpc_get_superusers (GError **error); GList * ccnet_rpc_get_emailusers_in_list(const char *source, const char *user_list, GError **error); int ccnet_rpc_update_emailuser_id (const char *old_email, const char *new_email, GError **error); int ccnet_rpc_create_group (const char *group_name, const char *user_name, const char *type, int parent_group_id, GError **error); int ccnet_rpc_create_org_group (int org_id, const char *group_name, const char *user_name, int parent_group_id, GError **error); int ccnet_rpc_remove_group (int group_id, GError **error); int ccnet_rpc_group_add_member (int group_id, const char *user_name, const char *member_name, GError **error); int ccnet_rpc_group_remove_member (int group_id, const char *user_name, const char *member_name, GError **error); int ccnet_rpc_group_set_admin (int group_id, const char *member_name, GError **error); int ccnet_rpc_group_unset_admin (int group_id, const char *member_name, GError **error); int ccnet_rpc_set_group_name (int group_id, const char *group_name, GError **error); int ccnet_rpc_quit_group (int group_id, const char *user_name, GError **error); GList * ccnet_rpc_get_groups (const char *username, int return_ancestors, GError **error); GList * ccnet_rpc_list_all_departments (GError **error); GList * ccnet_rpc_get_all_groups (int start, int limit, const char *source, GError **error); GList * ccnet_rpc_get_ancestor_groups (int group_id, GError **error); GList * ccnet_rpc_get_top_groups (int including_org, GError **error); GList * ccnet_rpc_get_child_groups (int group_id, GError **error); GList * ccnet_rpc_get_descendants_groups(int group_id, GError **error); GObject * ccnet_rpc_get_group (int group_id, GError **error); GList * ccnet_rpc_get_group_members (int group_id, int start, int limit, GError **error); GList * ccnet_rpc_get_members_with_prefix(int group_id, const char *prefix, GError **error); int ccnet_rpc_check_group_staff (int group_id, const char *user_name, int in_structure, GError **error); int ccnet_rpc_remove_group_user (const char *user, GError **error); int ccnet_rpc_is_group_user (int group_id, const char *user, int in_structure, GError **error); int ccnet_rpc_set_group_creator (int group_id, const char *user_name, GError **error); GList* ccnet_rpc_search_groups (const char *group_patt, int start, int limit, GError **error); GList * ccnet_rpc_get_groups_members (const char *group_ids, GError **error); GList * ccnet_rpc_search_group_members (int group_id, const char *pattern, GError **error); int ccnet_rpc_create_org (const char *org_name, const char *url_prefix, const char *creator, GError **error); int ccnet_rpc_remove_org (int org_id, GError **error); GList * ccnet_rpc_get_all_orgs (int start, int limit, GError **error); gint64 ccnet_rpc_count_orgs (GError **error); GObject * ccnet_rpc_get_org_by_url_prefix (const char *url_prefix, GError **error); GObject * ccnet_rpc_get_org_by_id (int org_id, GError **error); int ccnet_rpc_add_org_user (int org_id, const char *email, int is_staff, GError **error); int ccnet_rpc_remove_org_user (int org_id, const char *email, GError **error); GList * ccnet_rpc_get_orgs_by_user (const char *email, GError **error); GList * ccnet_rpc_get_org_emailusers (const char *url_prefix, int start , int limit, GError **error); int ccnet_rpc_add_org_group (int org_id, int group_id, GError **error); int ccnet_rpc_remove_org_group (int org_id, int group_id, GError **error); int ccnet_rpc_is_org_group (int group_id, GError **error); int ccnet_rpc_get_org_id_by_group (int group_id, GError **error); GList * ccnet_rpc_get_org_groups (int org_id, int start, int limit, GError **error); GList * ccnet_rpc_get_org_groups_by_user (const char *user, int org_id, GError **error); GList * ccnet_rpc_get_org_top_groups (int org_id, GError **error); int ccnet_rpc_org_user_exists (int org_id, const char *email, GError **error); int ccnet_rpc_is_org_staff (int org_id, const char *email, GError **error); int ccnet_rpc_set_org_staff (int org_id, const char *email, GError **error); int ccnet_rpc_unset_org_staff (int org_id, const char *email, GError **error); int ccnet_rpc_set_org_name (int org_id, const char *org_name, GError **error); int ccnet_rpc_set_reference_id (const char *primary_id, const char *reference_id, GError **error); char * ccnet_rpc_get_primary_id (const char *email, GError **error); #endif ================================================ FILE: lib/Makefile.am ================================================ pcfiles = libseafile.pc pkgconfig_DATA = $(pcfiles) pkgconfigdir = $(libdir)/pkgconfig AM_CPPFLAGS = @GLIB2_CFLAGS@ -I$(top_srcdir)/include \ -I$(top_srcdir)/lib \ -I$(top_srcdir)/common \ @SEARPC_CFLAGS@ \ @MSVC_CFLAGS@ \ -Wall BUILT_SOURCES = gensource ## source file rules seafile_object_define = repo.vala commit.vala dirent.vala dir.vala \ task.vala branch.vala crypt.vala webaccess.vala seahub.vala copy-task.vala ccnetobj.vala search-result.vala seafile_object_gen = $(seafile_object_define:.vala=.c) valac_gen = ${seafile_object_gen} seafile-object.h EXTRA_DIST = ${seafile_object_define} rpc_table.py $(pcfiles) vala.stamp utils_headers = net.h bloom-filter.h utils.h db.h job-mgr.h timer.h utils_srcs = $(utils_headers:.h=.c) noinst_HEADERS = ${utils_headers} include.h seafiledir = $(includedir)/seafile seafile_HEADERS = seafile-object.h seafile-object.h: ${seafile_object_define} rm -f $@ valac --pkg posix ${seafile_object_define} -C -H seafile-object.h DISTCLEANFILES = ${searpc_gen} ## library rules noinst_LTLIBRARIES = libseafile_common.la libseafile_common_la_SOURCES = ${seafile_object_gen} ${utils_srcs} libseafile_common_la_LDFLAGS = -no-undefined libseafile_common_la_LIBADD = @GLIB2_LIBS@ @GOBJECT_LIBS@ @SSL_LIBS@ -lcrypto @LIB_GDI32@ \ @LIB_UUID@ @LIB_WS32@ @LIB_PSAPI@ -lsqlite3 \ @LIBEVENT_LIBS@ @SEARPC_LIBS@ @LIB_SHELL32@ \ @ZLIB_LIBS@ searpc_gen = searpc-signature.h searpc-marshal.h gensource: ${searpc_gen} ${valac_gen} rpc_table.stamp: ${top_srcdir}/lib/rpc_table.py @rm -f rpc_table.tmp @touch rpc_table.tmp @echo "[libsearpc]: generating rpc header files" @PYTHON@ `which searpc-codegen.py` ${top_srcdir}/lib/rpc_table.py @echo "[libsearpc]: done" @mv -f rpc_table.tmp $@ ${searpc_gen}: rpc_table.stamp vala.stamp: ${seafile_object_define} rm -f ${seafile_object_gen} @rm -f vala.tmp @touch vala.tmp valac -C --pkg posix $^ @mv -f vala.tmp $@ ${seafile_object_gen}: vala.stamp clean-local: rm -f ${searpc_gen} rm -f rpc_table.pyc rm -f rpc_table.stamp rm -f rpc_table.tmp rm -f vala.tmp vala.stamp ${valac_gen} install-data-local: if MACOS sed -i '' -e "s|(DESTDIR)|${DESTDIR}|g" $(pcfiles) else ${SED} -i "s|(DESTDIR)|${DESTDIR}|g" $(pcfiles) endif ================================================ FILE: lib/bloom-filter.c ================================================ /* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */ #include #include #include #include #include #include "bloom-filter.h" #define SETBIT(a, n) (a[n/CHAR_BIT] |= (1<<(n%CHAR_BIT))) #define CLEARBIT(a, n) (a[n/CHAR_BIT] &= ~(1<<(n%CHAR_BIT))) #define GETBIT(a, n) (a[n/CHAR_BIT] & (1<<(n%CHAR_BIT))) Bloom* bloom_create(size_t size, int k, int counting) { Bloom *bloom; size_t csize = 0; if (k <=0 || k > 4) return NULL; if ( !(bloom = malloc(sizeof(Bloom))) ) return NULL; if ( !(bloom->a = calloc((size+CHAR_BIT-1)/CHAR_BIT, sizeof(char))) ) { free (bloom); return NULL; } if (counting) { csize = size*4; bloom->counters = calloc((csize+CHAR_BIT-1)/CHAR_BIT, sizeof(char)); if (!bloom->counters) { free (bloom); return NULL; } } bloom->asize = size; bloom->csize = csize; bloom->k = k; bloom->counting = counting; return bloom; } int bloom_destroy(Bloom *bloom) { free (bloom->a); if (bloom->counting) free (bloom->counters); free (bloom); return 0; } static void incr_bit (Bloom *bf, unsigned int bit_idx) { unsigned int char_idx, offset; unsigned char value; unsigned int high; unsigned int low; SETBIT (bf->a, bit_idx); if (!bf->counting) return; char_idx = bit_idx / 2; offset = bit_idx % 2; value = bf->counters[char_idx]; low = value & 0xF; high = (value & 0xF0) >> 4; if (offset == 0) { if (low < 0xF) low++; } else { if (high < 0xF) high++; } value = ((high << 4) | low); bf->counters[char_idx] = value; } static void decr_bit (Bloom *bf, unsigned int bit_idx) { unsigned int char_idx, offset; unsigned char value; unsigned int high; unsigned int low; if (!bf->counting) { CLEARBIT (bf->a, bit_idx); return; } char_idx = bit_idx / 2; offset = bit_idx % 2; value = bf->counters[char_idx]; low = value & 0xF; high = (value & 0xF0) >> 4; /* decrement, but once we have reached the max, never go back! */ if (offset == 0) { if ((low > 0) && (low < 0xF)) low--; if (low == 0) { CLEARBIT (bf->a, bit_idx); } } else { if ((high > 0) && (high < 0xF)) high--; if (high == 0) { CLEARBIT (bf->a, bit_idx); } } value = ((high << 4) | low); bf->counters[char_idx] = value; } int bloom_add(Bloom *bloom, const char *s) { int i; SHA256_CTX c; unsigned char sha256[SHA256_DIGEST_LENGTH]; size_t *sha_int = (size_t *)&sha256; SHA256_Init(&c); SHA256_Update(&c, s, strlen(s)); SHA256_Final (sha256, &c); for (i=0; i < bloom->k; ++i) incr_bit (bloom, sha_int[i] % bloom->asize); return 0; } int bloom_remove(Bloom *bloom, const char *s) { int i; SHA256_CTX c; unsigned char sha256[SHA256_DIGEST_LENGTH]; size_t *sha_int = (size_t *)&sha256; if (!bloom->counting) return -1; SHA256_Init(&c); SHA256_Update(&c, s, strlen(s)); SHA256_Final (sha256, &c); for (i=0; i < bloom->k; ++i) decr_bit (bloom, sha_int[i] % bloom->asize); return 0; } int bloom_test(Bloom *bloom, const char *s) { int i; SHA256_CTX c; unsigned char sha256[SHA256_DIGEST_LENGTH]; size_t *sha_int = (size_t *)&sha256; SHA256_Init(&c); SHA256_Update(&c, s, strlen(s)); SHA256_Final (sha256, &c); for (i=0; i < bloom->k; ++i) if(!(GETBIT(bloom->a, sha_int[i] % bloom->asize))) return 0; return 1; } ================================================ FILE: lib/bloom-filter.h ================================================ /* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */ #ifndef __BLOOM_H__ #define __BLOOM_H__ #include typedef struct { size_t asize; unsigned char *a; size_t csize; unsigned char *counters; int k; char counting:1; } Bloom; Bloom *bloom_create (size_t size, int k, int counting); int bloom_destroy (Bloom *bloom); int bloom_add (Bloom *bloom, const char *s); int bloom_remove (Bloom *bloom, const char *s); int bloom_test (Bloom *bloom, const char *s); #endif ================================================ FILE: lib/branch.vala ================================================ // compile this file with `valac --pkg posix repo.vala -C -H repo.h` namespace Seafile { public class Branch : Object { public string _name; public string name { get { return _name; } set { _name = value; } } public string _commit_id; public string commit_id { get { return _commit_id; } set { _commit_id = value; } } public string _repo_id; public string repo_id { get { return _repo_id; } set { _repo_id = value; } } } } // namespace ================================================ FILE: lib/ccnetobj.vala ================================================ namespace Ccnet { public class EmailUser : Object { public int id { get; set; } public string email { get; set; } public bool is_staff { get; set; } public bool is_active { get; set; } public int64 ctime { get; set; } public string source { get; set; } public string role { get; set; } public string password { get; set; } public string reference_id { get; set; } } public class Group : Object { public int id { get; set; } public string group_name { get; set; } public string creator_name { get; set; } public int64 timestamp { get; set; } public string source { get; set; } public int parent_group_id { get; set; } } public class GroupUser : Object { public int group_id { get; set; } public string user_name { get; set; } public int is_staff { get; set; } } public class Organization : Object { public int org_id { get; set; } public string email { get; set; } public int is_staff { get; set; } public string org_name { get; set; } public string url_prefix { get; set; } public string creator { get; set; } public int64 ctime { get; set; } } } // namespace ================================================ FILE: lib/commit.vala ================================================ // compile this file with `valac --pkg posix repo.vala -C -H repo.h` namespace Seafile { public class Commit : Object { // _id is for fast access from c code. id is for // vala to automatically generate a property. Note, // if a Vala property is start with _, it is not // translated into a GObject property. public char _id[41]; public string id { get { return (string)_id; } set { Posix.memcpy(_id, value, 40); _id[40] = '\0'; } } public string creator_name { get; set; } public string _creator; // creator public string creator { get { return _creator; } set { _creator = value; } } public string _desc; // description: what does this commit change public string desc { get { return _desc; } set { _desc = value; } } public int64 _ctime; // create time public int64 ctime { get { return _ctime; } set { _ctime = value; } } public string parent_id { get; set;} public string second_parent_id { get; set; } public string _repo_id; public string repo_id { get { return _repo_id; } set { _repo_id = value; } } // A commit point to a file or dir, not both. public string _root_id; public string root_id { get { return _root_id; } set { _root_id = value; } } // Repo data-format version of this commit public int version { get; set; } public bool new_merge { get; set; } public bool conflict { get; set; } // Used for returning file revision public string rev_file_id { get; set; } public int64 rev_file_size { get; set; } // Set if this commit renames a revision of a file public string rev_renamed_old_path { get; set; } public string device_name { get; set; } public string client_version { get; set; } //Only used for file history pagination public string next_start_commit { get; set; } } } // namespace ================================================ FILE: lib/copy-task.vala ================================================ namespace Seafile { public class CopyTask : Object { public int64 done { set; get; } public int64 total { set; get; } public bool canceled { set; get; } public bool failed { set; get; } public string failed_reason { set; get; } public bool successful { set; get; } } public class CopyResult : Object { public bool background { set; get; } public string task_id { set; get; } } } ================================================ FILE: lib/crypt.vala ================================================ namespace Seafile { public class CryptKey : Object { public string key { set; get; } public string iv { set; get; } } } ================================================ FILE: lib/db.c ================================================ #include #include #include "db.h" int sqlite_open_db (const char *db_path, sqlite3 **db) { int result; const char *errmsg; result = sqlite3_open (db_path, db); if (result) { errmsg = sqlite3_errmsg (*db); g_warning ("Couldn't open database:'%s', %s\n", db_path, errmsg ? errmsg : "no error given"); sqlite3_close (*db); return -1; } return 0; } int sqlite_close_db (sqlite3 *db) { return sqlite3_close (db); } sqlite3_stmt * sqlite_query_prepare (sqlite3 *db, const char *sql) { sqlite3_stmt *stmt; int result; result = sqlite3_prepare_v2 (db, sql, -1, &stmt, NULL); if (result != SQLITE_OK) { const gchar *str = sqlite3_errmsg (db); g_warning ("Couldn't prepare query, error:%d->'%s'\n\t%s\n", result, str ? str : "no error given", sql); return NULL; } return stmt; } int sqlite_query_exec (sqlite3 *db, const char *sql) { char *errmsg = NULL; int result; result = sqlite3_exec (db, sql, NULL, NULL, &errmsg); if (result != SQLITE_OK) { if (errmsg != NULL) { g_warning ("SQL error: %d - %s\n:\t%s\n", result, errmsg, sql); sqlite3_free (errmsg); } return -1; } return 0; } int sqlite_begin_transaction (sqlite3 *db) { char *sql = "BEGIN TRANSACTION;"; return sqlite_query_exec (db, sql); } int sqlite_end_transaction (sqlite3 *db) { char *sql = "END TRANSACTION;"; return sqlite_query_exec (db, sql); } gboolean sqlite_check_for_existence (sqlite3 *db, const char *sql) { sqlite3_stmt *stmt; int result; stmt = sqlite_query_prepare (db, sql); if (!stmt) return FALSE; result = sqlite3_step (stmt); if (result == SQLITE_ERROR) { const gchar *str = sqlite3_errmsg (db); g_warning ("Couldn't execute query, error: %d->'%s'\n", result, str ? str : "no error given"); sqlite3_finalize (stmt); return FALSE; } sqlite3_finalize (stmt); if (result == SQLITE_ROW) return TRUE; return FALSE; } int sqlite_foreach_selected_row (sqlite3 *db, const char *sql, SqliteRowFunc callback, void *data) { sqlite3_stmt *stmt; int result; int n_rows = 0; stmt = sqlite_query_prepare (db, sql); if (!stmt) { return -1; } while (1) { result = sqlite3_step (stmt); if (result != SQLITE_ROW) break; n_rows++; if (!callback (stmt, data)) break; } if (result == SQLITE_ERROR) { const gchar *s = sqlite3_errmsg (db); g_warning ("Couldn't execute query, error: %d->'%s'\n", result, s ? s : "no error given"); sqlite3_finalize (stmt); return -1; } sqlite3_finalize (stmt); return n_rows; } int sqlite_get_int (sqlite3 *db, const char *sql) { int ret = -1; int result; sqlite3_stmt *stmt; if ( !(stmt = sqlite_query_prepare(db, sql)) ) return 0; result = sqlite3_step (stmt); if (result == SQLITE_ROW) { ret = sqlite3_column_int (stmt, 0); sqlite3_finalize (stmt); return ret; } if (result == SQLITE_ERROR) { const gchar *str = sqlite3_errmsg (db); g_warning ("Couldn't execute query, error: %d->'%s'\n", result, str ? str : "no error given"); sqlite3_finalize (stmt); return -1; } sqlite3_finalize(stmt); return ret; } gint64 sqlite_get_int64 (sqlite3 *db, const char *sql) { gint64 ret = -1; int result; sqlite3_stmt *stmt; if ( !(stmt = sqlite_query_prepare(db, sql)) ) return 0; result = sqlite3_step (stmt); if (result == SQLITE_ROW) { ret = sqlite3_column_int64 (stmt, 0); sqlite3_finalize (stmt); return ret; } if (result == SQLITE_ERROR) { const gchar *str = sqlite3_errmsg (db); g_warning ("Couldn't execute query, error: %d->'%s'\n", result, str ? str : "no error given"); sqlite3_finalize (stmt); return -1; } sqlite3_finalize(stmt); return ret; } char *sqlite_get_string (sqlite3 *db, const char *sql) { const char *res = NULL; int result; sqlite3_stmt *stmt; char *ret; if ( !(stmt = sqlite_query_prepare(db, sql)) ) return NULL; result = sqlite3_step (stmt); if (result == SQLITE_ROW) { res = (const char *)sqlite3_column_text (stmt, 0); ret = g_strdup(res); sqlite3_finalize (stmt); return ret; } if (result == SQLITE_ERROR) { const gchar *str = sqlite3_errmsg (db); g_warning ("Couldn't execute query, error: %d->'%s'\n", result, str ? str : "no error given"); sqlite3_finalize (stmt); return NULL; } sqlite3_finalize(stmt); return NULL; } ================================================ FILE: lib/db.h ================================================ /* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */ #ifndef DB_UTILS_H #define DB_UTILS_H #include int sqlite_open_db (const char *db_path, sqlite3 **db); int sqlite_close_db (sqlite3 *db); sqlite3_stmt *sqlite_query_prepare (sqlite3 *db, const char *sql); int sqlite_query_exec (sqlite3 *db, const char *sql); int sqlite_begin_transaction (sqlite3 *db); int sqlite_end_transaction (sqlite3 *db); gboolean sqlite_check_for_existence (sqlite3 *db, const char *sql); typedef gboolean (*SqliteRowFunc) (sqlite3_stmt *stmt, void *data); int sqlite_foreach_selected_row (sqlite3 *db, const char *sql, SqliteRowFunc callback, void *data); int sqlite_get_int (sqlite3 *db, const char *sql); gint64 sqlite_get_int64 (sqlite3 *db, const char *sql); char *sqlite_get_string (sqlite3 *db, const char *sql); #endif ================================================ FILE: lib/dir.vala ================================================ namespace Seafile { public class Dir : Object { // _id is for fast access from c code. id is for // vala to automatically generate a property. Note, // if a Vala property is start with _, it is not // translated into a GObject property. public char _id[41]; public string id { get { return (string)_id; } set { Posix.memcpy(_id, value, 40); _id[40] = '\0'; } } public List entries; public int version { set; get; } } public class FileCountInfo : Object { public int64 file_count { set; get; } public int64 dir_count { set; get; } public int64 size { set; get; } } } // namespace ================================================ FILE: lib/dirent.vala ================================================ namespace Seafile { public class Dirent : Object { // _id is for fast access from c code. id is for // vala to automatically generate a property. Note, // if a Vala property is start with _, it is not // translated into a GObject property. public string obj_id { set; get; } public string obj_name { set; get; } public int mode { set; get; } public int version { set; get; } public int64 mtime { set; get; } public int64 size { set; get; } public string modifier { set; get;} public string permission { set; get; } public bool is_locked { set; get; } public string lock_owner { set; get; } public int64 lock_time { set; get; } public bool is_shared { set; get; } } public class FileLastModifiedInfo : Object { public string file_name { set; get; } public int64 last_modified { set; get; } } } // namespace ================================================ FILE: lib/file.vala ================================================ namespace Seafile { public class File : Object { // _id is for fast access from c code. id is for // vala to automatically generate a property. Note, // if a Vala property is start with _, it is not // translated into a GObject property. public char _id[41]; public string id { get { return (string)_id; } set { Posix.memcpy(_id, id, 40); _id[40] = '\0'; } } public uint64 size; } } // namespace ================================================ FILE: lib/include.h ================================================ #include #include #include #include #include #include #include #include "utils.h" #ifndef ccnet_warning #define ccnet_warning(fmt, ...) g_warning( "%s: " fmt, __func__ , ##__VA_ARGS__) #endif #ifndef ccnet_error #define ccnet_error(fmt, ...) g_error( "%s: " fmt, __func__ , ##__VA_ARGS__) #endif #ifndef ccnet_message #define ccnet_message(fmt, ...) g_message(fmt, ##__VA_ARGS__) #endif #ifndef ccnet_debug #define ccnet_debug(fmt, ...) g_debug(fmt, ##__VA_ARGS__) #endif #ifndef ENABLE_DEBUG #undef g_debug #define g_debug(...) #endif ================================================ FILE: lib/job-mgr.c ================================================ /* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */ #if defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__) #include #include #else #include #endif #include #include #include #include #define MAX_THREADS 50 #define MAX_IDLE_THREADS 10 #include "utils.h" #include "job-mgr.h" struct _CcnetJob { CcnetJobManager *manager; int id; ccnet_pipe_t pipefd[2]; JobThreadFunc thread_func; JobDoneCallback done_func; /* called when the thread is done */ void *data; /* the done callback should only access this field */ void *result; }; void ccnet_job_manager_remove_job (CcnetJobManager *mgr, int job_id); static void job_thread_wrapper (void *vdata, void *unused) { CcnetJob *job = vdata; job->result = job->thread_func (job->data); if (pipewriten (job->pipefd[1], "a", 1) != 1) { g_warning ("[Job Manager] write to pipe error: %s\n", strerror(errno)); } } static void job_done_cb (evutil_socket_t fd, short event, void *vdata) { CcnetJob *job = vdata; char buf[1]; if (pipereadn (job->pipefd[0], buf, 1) != 1) { g_warning ("[Job Manager] read pipe error: %s\n", strerror(errno)); } pipeclose (job->pipefd[0]); pipeclose (job->pipefd[1]); if (job->done_func) { job->done_func (job->result); } ccnet_job_manager_remove_job (job->manager, job->id); } int job_thread_create (CcnetJob *job) { if (ccnet_pipe (job->pipefd) < 0) { g_warning ("pipe error: %s\n", strerror(errno)); return -1; } g_thread_pool_push (job->manager->thread_pool, job, NULL); #ifndef UNIT_TEST event_once (job->pipefd[0], EV_READ, job_done_cb, job, NULL); #endif return 0; } CcnetJob * ccnet_job_new () { CcnetJob *job; job = g_new0 (CcnetJob, 1); return job; } void ccnet_job_free (CcnetJob *job) { g_free (job); } CcnetJobManager * ccnet_job_manager_new (int max_threads) { CcnetJobManager *mgr; mgr = g_new0 (CcnetJobManager, 1); mgr->jobs = g_hash_table_new_full (g_direct_hash, g_direct_equal, NULL, (GDestroyNotify)ccnet_job_free); mgr->thread_pool = g_thread_pool_new (job_thread_wrapper, NULL, max_threads, FALSE, NULL); /* g_thread_pool_set_max_unused_threads (MAX_IDLE_THREADS); */ return mgr; } void ccnet_job_manager_free (CcnetJobManager *mgr) { g_hash_table_destroy (mgr->jobs); g_thread_pool_free (mgr->thread_pool, TRUE, FALSE); g_free (mgr); } int ccnet_job_manager_schedule_job (CcnetJobManager *mgr, JobThreadFunc func, JobDoneCallback done_func, void *data) { CcnetJob *job = ccnet_job_new (); job->id = mgr->next_job_id++; job->manager = mgr; job->thread_func = func; job->done_func = done_func; job->data = data; g_hash_table_insert (mgr->jobs, (gpointer)(long)job->id, job); if (job_thread_create (job) < 0) { g_hash_table_remove (mgr->jobs, (gpointer)(long)job->id); return -1; } return job->id; } void ccnet_job_manager_remove_job (CcnetJobManager *mgr, int job_id) { g_hash_table_remove (mgr->jobs, (gpointer)(long)job_id); } #ifdef UNIT_TEST void ccnet_job_manager_wait_job (CcnetJobManager *mgr, int job_id) { CcnetJob *job; job = g_hash_table_lookup (mgr->jobs, (gpointer)(long)job_id); /* manually call job_done_cb */ job_done_cb (0, 0, (void *)job); } #endif ================================================ FILE: lib/job-mgr.h ================================================ /* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */ /** * Job Manager manages long term jobs. These jobs are run in their * own threads. */ #ifndef JOB_MGR_H #define JOB_MGR_H #include struct _CcnetSession; typedef struct _CcnetJob CcnetJob; typedef struct _CcnetJobManager CcnetJobManager; /* The thread func should return the result back by return (void *)result; The result will be passed to JobDoneCallback. */ typedef void* (*JobThreadFunc)(void *data); typedef void (*JobDoneCallback)(void *result); struct _CcnetJobManager { GHashTable *jobs; GThreadPool *thread_pool; int next_job_id; }; void ccnet_job_cancel (CcnetJob *job); CcnetJobManager * ccnet_job_manager_new (int max_threads); void ccnet_job_manager_free (CcnetJobManager *mgr); int ccnet_job_manager_schedule_job (CcnetJobManager *mgr, JobThreadFunc func, JobDoneCallback done_func, void *data); /** * Wait a specific job to be done. */ void ccnet_job_manager_wait_job (CcnetJobManager *mgr, int job_id); #endif ================================================ FILE: lib/libseafile.pc.in ================================================ prefix=(DESTDIR)@prefix@ exec_prefix=@exec_prefix@ libdir=@libdir@ includedir=@includedir@ Name: libseafile Description: Client library for accessing seafile service. Version: @VERSION@ Libs: -L${libdir} -lseafile @SEARPC_LIBS@ Cflags: -I${includedir} @SEARPC_CFLAGS@ Requires: gobject-2.0 glib-2.0 ================================================ FILE: lib/net.c ================================================ /* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */ #ifdef WIN32 #define WINVER 0x0501 #include #include #include #include #endif #include "include.h" #include #include #include #include #include #ifdef WIN32 #define UNUSED #else #include #include #include #include #include #include #include #include #include #endif #include #if defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__) #include #else #include #endif #include "net.h" #ifdef WIN32 #ifndef inet_aton int inet_aton(const char *string, struct in_addr *addr) { addr->s_addr = inet_addr(string); if (addr->s_addr != -1 || strcmp("255.255.255.255", string) == 0) return 1; return 0; } #endif #endif //WIN32 int ccnet_netSetTOS (evutil_socket_t s, int tos) { #ifdef IP_TOS return setsockopt( s, IPPROTO_IP, IP_TOS, (char*)&tos, sizeof( tos ) ); #else return 0; #endif } static evutil_socket_t makeSocketNonBlocking (evutil_socket_t fd) { if (fd >= 0) { if (evutil_make_socket_nonblocking(fd)) { ccnet_warning ("Couldn't make socket nonblock: %s", evutil_socket_error_to_string(EVUTIL_SOCKET_ERROR())); evutil_closesocket(fd); fd = -1; } } return fd; } static evutil_socket_t createSocket (int family, int nonblock) { evutil_socket_t fd; int ret; fd = socket (family, SOCK_STREAM, 0); if (fd < 0) { ccnet_warning("create Socket failed %d\n", fd); } else if (nonblock) { int nodelay = 1; fd = makeSocketNonBlocking( fd ); ret = setsockopt (fd, IPPROTO_TCP, TCP_NODELAY, (char *)&nodelay, sizeof(nodelay)); if (ret < 0) { ccnet_warning("setsockopt failed\n"); evutil_closesocket(fd); return -1; } } return fd; } evutil_socket_t ccnet_net_open_tcp (const struct sockaddr *sa, int nonblock) { evutil_socket_t s; int sa_len; if( (s = createSocket(sa->sa_family, nonblock)) < 0 ) return -1; #ifndef WIN32 if (sa->sa_family == AF_INET) sa_len = sizeof (struct sockaddr_in); else sa_len = sizeof (struct sockaddr_in6); #else if (sa->sa_family == AF_INET) sa_len = sizeof (struct sockaddr_in); else return -1; #endif if( (connect(s, sa, sa_len) < 0) #ifdef WIN32 && (sockerrno != WSAEWOULDBLOCK) #endif && (sockerrno != EINPROGRESS) ) { evutil_closesocket(s); s = -1; } return s; } evutil_socket_t ccnet_net_bind_tcp (int port, int nonblock) { #ifndef WIN32 int sockfd, n; struct addrinfo hints, *res, *ressave; char buf[10]; memset (&hints, 0,sizeof (struct addrinfo)); hints.ai_flags = AI_PASSIVE; hints.ai_family = AF_UNSPEC; hints.ai_socktype = SOCK_STREAM; snprintf (buf, sizeof(buf), "%d", port); if ( (n = getaddrinfo(NULL, buf, &hints, &res) ) != 0) { ccnet_warning ("getaddrinfo fails: %s\n", gai_strerror(n)); return -1; } ressave = res; do { int on = 1; sockfd = socket(res->ai_family, res->ai_socktype, res->ai_protocol); if (sockfd < 0) continue; /* error - try next one */ if (setsockopt(sockfd, SOL_SOCKET, SO_REUSEADDR, &on, sizeof(on)) < 0) { ccnet_warning ("setsockopt of SO_REUSEADDR error\n"); continue; } if (nonblock) sockfd = makeSocketNonBlocking (sockfd); if (sockfd < 0) continue; /* error - try next one */ if (bind(sockfd, res->ai_addr, res->ai_addrlen) == 0) break; /* success */ close(sockfd); /* bind error - close and try next one */ } while ( (res = res->ai_next) != NULL); freeaddrinfo (ressave); if (res == NULL) { ccnet_warning ("bind fails: %s\n", strerror(errno)); return -1; } return sockfd; #else evutil_socket_t s; struct sockaddr_in sock; const int type = AF_INET; #if defined( SO_REUSEADDR ) || defined( SO_REUSEPORT ) int optval; #endif if ((s = createSocket(type, nonblock)) < 0) return -1; optval = 1; setsockopt (s, SOL_SOCKET, SO_REUSEADDR, (char*)&optval, sizeof(optval)); memset(&sock, 0, sizeof(sock)); sock.sin_family = AF_INET; sock.sin_addr.s_addr = INADDR_ANY; sock.sin_port = htons(port); if ( bind(s, (struct sockaddr *)&sock, sizeof(struct sockaddr_in)) < 0) { ccnet_warning ("bind fails: %s\n", strerror(errno)); evutil_closesocket (s); return -1; } if (nonblock) s = makeSocketNonBlocking (s); return s; #endif } int ccnet_net_make_socket_blocking(evutil_socket_t fd) { #ifdef WIN32 { u_long nonblocking = 0; if (ioctlsocket(fd, FIONBIO, &nonblocking) == SOCKET_ERROR) { ccnet_warning ("fcntl(%d, F_GETFL)", (int)fd); return -1; } } #else { int flags; if ((flags = fcntl(fd, F_GETFL, NULL)) < 0) { ccnet_warning ("fcntl(%d, F_GETFL)", fd); return -1; } if (fcntl(fd, F_SETFL, flags & ~O_NONBLOCK) == -1) { ccnet_warning ("fcntl(%d, F_SETFL)", fd); return -1; } } #endif return 0; } evutil_socket_t ccnet_net_accept (evutil_socket_t b, struct sockaddr_storage *cliaddr, socklen_t *len, int nonblock) { evutil_socket_t s; /* int nodelay = 1; */ s = accept (b, (struct sockaddr *)cliaddr, len); /* setsockopt (s, IPPROTO_TCP, TCP_NODELAY, &nodelay, sizeof(nodelay)); */ if (nonblock) makeSocketNonBlocking(s); return s; } evutil_socket_t ccnet_net_bind_v4 (const char *ipaddr, int *port) { evutil_socket_t sockfd; struct sockaddr_in addr; int on = 1; sockfd = socket (AF_INET, SOCK_STREAM, 0); if (sockfd < 0) { ccnet_warning("create socket failed: %s\n", strerror(errno)); exit(-1); } memset (&addr, 0, sizeof (struct sockaddr_in)); addr.sin_family = AF_INET; if (inet_aton(ipaddr, &addr.sin_addr) == 0) { ccnet_warning ("Bad ip address %s\n", ipaddr); return -1; } addr.sin_port = htons (*port); if (setsockopt(sockfd, SOL_SOCKET, SO_REUSEADDR, (char *)&on, sizeof(on)) < 0) { ccnet_warning ("setsockopt of SO_REUSEADDR error: %s\n", strerror(errno)); return -1; } if ( bind(sockfd, (struct sockaddr *)&addr, sizeof(addr)) < 0) { ccnet_warning ("Bind error: %s\n", strerror (errno)); return -1; } if (*port == 0) { struct sockaddr_storage ss; socklen_t len; len = sizeof(ss); if (getsockname(sockfd, (struct sockaddr *)&ss, &len) < 0) { ccnet_warning ("getsockname error: %s\n", strerror(errno)); return -1; } *port = sock_port ((struct sockaddr *)&ss); } return sockfd; } char * sock_ntop(const struct sockaddr *sa, socklen_t salen) { static char str[128]; /* Unix domain is largest */ switch (sa->sa_family) { case AF_INET: { struct sockaddr_in *sin = (struct sockaddr_in *) sa; if (evutil_inet_ntop(AF_INET, &sin->sin_addr, str, sizeof(str)) == NULL) return(NULL); return(str); } #ifdef IPv6 case AF_INET6: { struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) sa; if (evutil_inet_ntop(AF_INET6, &sin6->sin6_addr, str, sizeof(str) - 1) == NULL) return(NULL); return (str); } #endif #ifndef WIN32 #ifdef AF_UNIX case AF_UNIX: { struct sockaddr_un *unp = (struct sockaddr_un *) sa; /* OK to have no pathname bound to the socket: happens on every connect() unless client calls bind() first. */ if (unp->sun_path[0] == 0) strcpy(str, "(no pathname bound)"); else snprintf(str, sizeof(str), "%s", unp->sun_path); return(str); } #endif #endif default: snprintf(str, sizeof(str), "sock_ntop: unknown AF_xxx: %d, len %d", sa->sa_family, salen); return(str); } return (NULL); } int sock_pton (const char *addr_str, uint16_t port, struct sockaddr_storage *sa) { struct sockaddr_in *saddr = (struct sockaddr_in *) sa; #ifndef WIN32 struct sockaddr_in6 *saddr6 = (struct sockaddr_in6 *) sa; #endif if (evutil_inet_pton (AF_INET, addr_str, &saddr->sin_addr) == 1 ) { saddr->sin_family = AF_INET; saddr->sin_port = htons (port); return 0; } #ifndef WIN32 else if (evutil_inet_pton (AF_INET6, addr_str, &saddr6->sin6_addr) == 1) { saddr6->sin6_family = AF_INET6; saddr6->sin6_port = htons (port); return 0; } #endif return -1; } /* return 1 if addr_str is a valid ipv4 or ipv6 address */ int is_valid_ipaddr (const char *addr_str) { struct sockaddr_storage addr; if (!addr_str) return 0; if (sock_pton(addr_str, 0, &addr) < 0) return 0; return 1; } uint16_t sock_port (const struct sockaddr *sa) { switch (sa->sa_family) { case AF_INET: { struct sockaddr_in *sin = (struct sockaddr_in *) sa; return ntohs(sin->sin_port); } #ifdef IPv6 case AF_INET6: { struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) sa; return ntohs(sin6->sin6_port); } #endif default: return 0; } return 0; } evutil_socket_t udp_client (const char *host, const char *serv, struct sockaddr **saptr, socklen_t *lenp) { evutil_socket_t sockfd; int n; struct addrinfo hints, *res, *ressave; memset (&hints, 0, sizeof(struct addrinfo)); hints.ai_family = AF_UNSPEC; hints.ai_socktype = SOCK_DGRAM; if ((n = getaddrinfo(host, serv, &hints, &res)) != 0) { ccnet_warning ("udp_client error for %s, %s: %s", host, serv, gai_strerror(n)); return -1; } ressave = res; do { sockfd = socket(res->ai_family, res->ai_socktype, res->ai_protocol); if (sockfd >= 0) break; /* success */ } while ( (res = res->ai_next) != NULL); if (res == NULL) { /* errno set from final socket() */ ccnet_warning ("udp_client error for %s, %s", host, serv); freeaddrinfo (ressave); return -1; } *saptr = malloc(res->ai_addrlen); memcpy(*saptr, res->ai_addr, res->ai_addrlen); *lenp = res->ai_addrlen; freeaddrinfo(ressave); return (sockfd); } int family_to_level(int family) { switch (family) { case AF_INET: return IPPROTO_IP; #ifdef IPV6 case AF_INET6: return IPPROTO_IPV6; #endif default: return -1; } } #ifdef WIN32 static int mcast_join(evutil_socket_t sockfd, const struct sockaddr *grp, socklen_t grplen, const char *ifname, u_int ifindex) { int optval = 3; int sockm; if (setsockopt(sockfd, IPPROTO_IP, IP_MULTICAST_TTL, (char *)&optval, sizeof(int)) == SOCKET_ERROR) { ccnet_warning("Fail to set socket multicast TTL, LastError=%d\n", WSAGetLastError()); return -1; } optval = 0; if (setsockopt(sockfd, IPPROTO_IP, IP_MULTICAST_LOOP, (char *)&optval, sizeof(int)) == SOCKET_ERROR) { ccnet_warning("Fail to set socket multicast LOOP, LastError=%d\n", WSAGetLastError()); return -1; } sockm = WSAJoinLeaf (sockfd, grp, grplen, NULL, NULL, NULL, NULL, JL_BOTH); if (sockm == INVALID_SOCKET) { ccnet_warning("Fail to join multicast group, LastError=%d\n", WSAGetLastError()); return -1; } return sockm; } evutil_socket_t create_multicast_sock (struct sockaddr *sasend, socklen_t salen) { int ret; const int on = 1; evutil_socket_t recvfd; struct sockaddr *sarecv; recvfd = WSASocket (AF_INET, SOCK_DGRAM, 0, NULL, 0, WSA_FLAG_MULTIPOINT_C_LEAF|WSA_FLAG_MULTIPOINT_D_LEAF |WSA_FLAG_OVERLAPPED); if (recvfd < 0) { ccnet_warning ("Create multicast listen socket fails: %d\n", WSAGetLastError()); return -1; } ret = setsockopt(recvfd, SOL_SOCKET, SO_REUSEADDR, (char *)&on, sizeof(on)); if (ret != 0) { ccnet_warning("Failed to setsockopt SO_REUSEADDR, WSAGetLastError=%d\n", WSAGetLastError()); return -1; } sarecv = malloc(salen); memcpy(sarecv, sasend, salen); struct sockaddr_in *saddr = (struct sockaddr_in *)sarecv; saddr->sin_addr.s_addr = INADDR_ANY; if (bind(recvfd, sarecv, salen) < 0) { ccnet_warning("Bind multicast bind socket failed LastError=%d\n", WSAGetLastError()); free (sarecv); return -1;; } free (sarecv); if (mcast_join(recvfd, sasend, salen, NULL, 0) < 0) { ccnet_warning ("mcast_join error: %s\n", strerror(errno)); return -1; } return recvfd; } #else static int mcast_join(evutil_socket_t sockfd, const struct sockaddr *grp, socklen_t grplen, const char *ifname, u_int ifindex) { #if (defined MCAST_JOIN_GROUP) && (! defined __APPLE__) struct group_req req; if (ifindex > 0) { req.gr_interface = ifindex; } else if (ifname != NULL) { if ( (req.gr_interface = if_nametoindex(ifname)) == 0) { errno = ENXIO; /* i/f name not found */ return(-1); } } else req.gr_interface = 0; if (grplen > sizeof(req.gr_group)) { errno = EINVAL; return -1; } memcpy(&req.gr_group, grp, grplen); return (setsockopt(sockfd, family_to_level(grp->sa_family), MCAST_JOIN_GROUP, &req, sizeof(req))); #else /* end mcast_join1 */ /* include mcast_join2 */ switch (grp->sa_family) { case AF_INET: { struct ip_mreq mreq; struct ifreq ifreq; memcpy(&mreq.imr_multiaddr.s_addr, &((const struct sockaddr_in *) grp)->sin_addr, sizeof(struct in_addr)); if (ifindex > 0) { if (if_indextoname(ifindex, ifreq.ifr_name) == NULL) { errno = ENXIO; /* i/f index not found */ return(-1); } goto doioctl; } else if (ifname != NULL) { strncpy(ifreq.ifr_name, ifname, IFNAMSIZ); doioctl: if (ioctl(sockfd, SIOCGIFADDR, &ifreq) < 0) return(-1); memcpy(&mreq.imr_interface, &((struct sockaddr_in *) &ifreq.ifr_addr)->sin_addr, sizeof(struct in_addr)); } else mreq.imr_interface.s_addr = htonl(INADDR_ANY); return(setsockopt(sockfd, IPPROTO_IP, IP_ADD_MEMBERSHIP, &mreq, sizeof(mreq))); } /* end mcast_join2 */ /* include mcast_join3 */ #ifdef IPV6 #ifndef IPV6_JOIN_GROUP /* APIv0 compatibility */ #define IPV6_JOIN_GROUP IPV6_ADD_MEMBERSHIP #endif case AF_INET6: { struct ipv6_mreq mreq6; memcpy(&mreq6.ipv6mr_multiaddr, &((const struct sockaddr_in6 *) grp)->sin6_addr, sizeof(struct in6_addr)); if (ifindex > 0) { mreq6.ipv6mr_interface = ifindex; } else if (ifname != NULL) { if ( (mreq6.ipv6mr_interface = if_nametoindex(ifname)) == 0) { errno = ENXIO; /* i/f name not found */ return(-1); } } else mreq6.ipv6mr_interface = 0; return(setsockopt(sockfd, IPPROTO_IPV6, IPV6_JOIN_GROUP, &mreq6, sizeof(mreq6))); } #endif default: errno = EAFNOSUPPORT; return(-1); } #endif return -1; } evutil_socket_t create_multicast_sock (struct sockaddr *sasend, socklen_t salen) { int ret; const int on = 1; evutil_socket_t recvfd; struct sockaddr *sarecv; if ( (recvfd = socket (sasend->sa_family, SOCK_DGRAM, 0)) < 0) { ccnet_warning ("Create multicast listen socket fails: %s\n", strerror(errno)); return -1; } ret = setsockopt(recvfd, SOL_SOCKET, SO_REUSEADDR, (char *)&on, sizeof(on)); if (ret < 0) ccnet_warning("Failed to setsockopt SO_REUSEADDR\n"); sarecv = malloc(salen); memcpy(sarecv, sasend, salen); if (bind(recvfd, sarecv, salen) < 0) { ccnet_warning ("Bind multicast listen socket fails: %s\n", strerror(errno)); free (sarecv); return -1; } free (sarecv); if (mcast_join(recvfd, sasend, salen, NULL, 0) < 0) { ccnet_warning ("mcast_join error: %s\n", strerror(errno)); return -1; } return recvfd; } #endif int sockfd_to_family(evutil_socket_t sockfd) { struct sockaddr_storage ss; socklen_t len; len = sizeof(ss); if (getsockname(sockfd, (struct sockaddr *) &ss, &len) < 0) return(-1); return(ss.ss_family); } int mcast_set_loop(evutil_socket_t sockfd, int onoff) { #ifndef WIN32 switch (sockfd_to_family(sockfd)) { case AF_INET: { u_char flag; flag = onoff; return(setsockopt(sockfd, IPPROTO_IP, IP_MULTICAST_LOOP, &flag, sizeof(flag))); } #ifdef IPV6 case AF_INET6: { u_int flag; flag = onoff; return(setsockopt(sockfd, IPPROTO_IPV6, IPV6_MULTICAST_LOOP, &flag, sizeof(flag))); } #endif default: errno = EAFNOSUPPORT; return(-1); } #else return -1; #endif /* WIN32 */ } ================================================ FILE: lib/net.h ================================================ #ifndef CCNET_NET_H #define CCNET_NET_H #ifdef WIN32 #include #include #include typedef int socklen_t; #define UNUSED #else #include #include #include #include #include #include #include #include #endif #if defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__) #include #else #include #endif #ifdef WIN32 /* #define ECONNREFUSED WSAECONNREFUSED */ /* #define ECONNRESET WSAECONNRESET */ /* #define EHOSTUNREACH WSAEHOSTUNREACH */ /* #define EINPROGRESS WSAEINPROGRESS */ /* #define ENOTCONN WSAENOTCONN */ /* #define EWOULDBLOCK WSAEWOULDBLOCK */ #define sockerrno WSAGetLastError( ) #else #include #define sockerrno errno #endif #ifdef WIN32 extern int inet_aton(const char *string, struct in_addr *addr); extern const char *inet_ntop(int af, const void *src, char *dst, size_t size); extern int inet_pton(int af, const char *src, void *dst); #endif evutil_socket_t ccnet_net_open_tcp (const struct sockaddr *sa, int nonblock); evutil_socket_t ccnet_net_bind_tcp (int port, int nonblock); evutil_socket_t ccnet_net_accept (evutil_socket_t b, struct sockaddr_storage *cliaddr, socklen_t *len, int nonblock); int ccnet_net_make_socket_blocking (evutil_socket_t fd); /* bind to an IPv4 address, if (*port == 0) the port number will be returned */ evutil_socket_t ccnet_net_bind_v4 (const char *ipaddr, int *port); int ccnet_netSetTOS ( evutil_socket_t s, int tos ); char *sock_ntop(const struct sockaddr *sa, socklen_t salen); uint16_t sock_port (const struct sockaddr *sa); /* return 1 if addr_str is a valid ipv4 or ipv6 address */ int is_valid_ipaddr (const char *addr_str); /* return 0 if success, -1 if error */ int sock_pton (const char *addr_str, uint16_t port, struct sockaddr_storage *sa); evutil_socket_t udp_client (const char *host, const char *serv, struct sockaddr **saptr, socklen_t *lenp); int mcast_set_loop(evutil_socket_t sockfd, int onoff); evutil_socket_t create_multicast_sock (struct sockaddr *sasend, socklen_t salen); #endif ================================================ FILE: lib/repo.vala ================================================ namespace Seafile { public class Repo : Object { // Section 1: Basic information // Members in this section should be set for every Repo object // _id is for fast access from c code. id is for // vala to automatically generate a property. Note, // if a Vala property is start with _, it is not // translated into a GObject property. // Due to performance reasons, 'desc', 'magic', 'enc_version', 'root', 'repaired', 'random_key' // are no longer returned in listing repos API. public char _id[37]; public string id { get { return (string)_id; } set { Posix.memcpy(_id, value, 36); _id[36] = '\0'; } } public string _name; public string name { get { return _name; } set { _name = value; } } public string _desc; // description public string desc { get { return _desc; } set { _desc = value; } } // data format version public int version { get; set; } public int64 last_modify { get; set; } public int64 size { get; set; } public int64 file_count { get; set; } public string last_modifier { get; set; } public string head_cmmt_id { get; set; } public string root { get; set; } public int status { get; set; } public string repo_type { get; set; } // To be compatible with obsoleted SharedRepo object public string repo_id { get; set; } public string repo_name { get; set; } public string repo_desc { get; set; } public int64 last_modified { get; set; } // Section 2: Encryption related // Members in this section should be set for every Repo object public bool encrypted { get; set; } public string magic { get; set; } public int enc_version { get; set; } public string random_key { get; set; } public string salt { get; set; } public string pwd_hash { get; set; } public string pwd_hash_algo { get; set; } public string pwd_hash_params { get; set; } // Section 3: Client only information // Should be set for all client repo objects public string _worktree; public string worktree { get { return _worktree; } set { _worktree = value; } } public string _relay_id; public string relay_id { get { return _relay_id; } set { _relay_id = value; } } public int last_sync_time { get; set; } public bool auto_sync { get; set; } public bool worktree_invalid { get; set; } // Section 4: Server only information // Should be set for all server repo objects // virutal repo related public bool is_virtual { get; set; } public string origin_repo_id { get; set; } public string origin_repo_name { get; set; } public string origin_path { get; set; } public bool is_original_owner { get; set; } public string virtual_perm { get; set; } // Used to access fs objects public string store_id { get; set; } public bool is_corrupted { get; set; } public bool repaired { get; set; } // Section 5: Share information // Only set in list_share_repos, get_group_repos and get_inner_pub_repos, etc public string share_type { get; set; } // personal, group or public public string permission { get; set; } public string user { get; set; } // share from or share to public int group_id { get; set; } // used when shared to group public string group_name { get; set; } // used when shared to group // For list_owned_repo public bool is_shared { get; set; } } public class TrashRepo : Object { public string repo_id { get; set; } public string repo_name { get; set; } public string head_id { get; set; } public string owner_id { get; set; } public int64 size { get; set; } public int64 del_time { get; set; } public bool encrypted { get; set; } } public class SyncInfo : Object { public string repo_id { get; set; } public string head_commit { get; set; } public bool deleted_on_relay { get; set; } public bool bad_local_branch { get; set; } public bool need_fetch { get; set; } public bool need_upload { get; set; } public bool need_merge { get; set; } // public int last_sync_time { get; set; } } public class SyncTask : Object { public bool is_sync_lan { get; set; } public bool force_upload { get; set; } public string dest_id { get; set; } public string repo_id { get; set; } public string state { get; set; } public string error { get; set; } public string tx_id { get; set; } } public class SessionInfo : Object { public string datadir { get; set; } } public class CheckoutTask : Object { public string repo_id { get; set; } public string worktree { get; set; } public int total_files { get; set; } public int finished_files { get; set; } } public class DiffEntry : Object { public string status { get; set; } public string name { get; set; } public string new_name { get; set; } } public class DeletedEntry : Object { public string commit_id { get; set; } public string obj_id { get; set; } public string obj_name { get; set; } public string basedir { get; set; } public int mode { get; set; } public int delete_time { get; set; } public int64 file_size { get; set; } public string scan_stat { get; set; } } public class RepoTokenInfo: Object { public string repo_id { get; set; } public string repo_name { get; set; } public string repo_owner { get; set; } public string email { get; set; } public string token { get; set; } public string peer_id { get; set; } public string peer_ip { get; set; } public string peer_name { get; set; } public int64 sync_time { get; set; } public string client_ver { get; set; } } public class SharedUser : Object { public string repo_id { get; set; } public string user { get; set; } public string perm { get; set; } } public class SharedGroup : Object { public string repo_id { get; set; } public int group_id { get; set; } public string perm { get; set; } } public class EncryptionInfo: Object { public string repo_id { get; set; } public string passwd { get; set; } public int enc_version { get; set; } public string magic { get; set; } public string random_key { get; set; } public string salt { get; set; } public string pwd_hash { get; set; } public string pwd_hash_algo { get; set; } public string pwd_hash_params { get; set; } } public class UserQuotaUsage: Object { public string user { get; set; } public int64 usage { get; set; } } } // namespace ================================================ FILE: lib/rpc_table.py ================================================ """ Define RPC functions needed to generate """ # [ , [] ] func_table = [ [ "int", [] ], [ "int", ["int"] ], [ "int", ["int", "int"] ], [ "int", ["int", "string"] ], [ "int", ["int", "string", "int"] ], [ "int", ["int", "string", "string"] ], [ "int", ["int", "string", "int", "int"] ], [ "int", ["int", "int", "string", "string"] ], [ "int", ["int", "string", "string", "int"] ], [ "int", ["string"] ], [ "int", ["string", "int"] ], [ "int", ["string", "int", "int"] ], [ "int", ["string", "int", "string"] ], [ "int", ["string", "int", "string", "string"] ], [ "int", ["string", "int", "int", "string", "string"] ], [ "int", ["string", "string"] ], [ "int", ["string", "string", "int"] ], [ "int", ["string", "string", "int64"] ], [ "int", ["string", "string", "string"] ], [ "int", ["string", "string", "int", "int"] ], [ "int", ["string", "string", "string", "int"] ], [ "int", ["string", "string", "string", "int", "string"] ], [ "int", ["string", "string", "string", "string"] ], [ "int", ["string", "string", "string", "string", "string"] ], [ "int", ["string", "int", "string", "int", "int"] ], [ "int", ["string", "string", "string", "string", "string", "string"] ], [ "int", ["string", "string", "string", "int", "string", "string"] ], [ "int", ["string", "string", "string", "string", "string", "string", "string"] ], [ "int", ["string", "int64"]], [ "int", ["int", "int64"]], [ "int", ["int", "string", "int64"]], [ "int64", [] ], [ "int64", ["string"] ], [ "int64", ["int"]], [ "int64", ["int", "string"]], [ "int64", ["string", "string"]], [ "int64", ["string", "int", "string"] ], [ "string", [] ], [ "string", ["int"] ], [ "string", ["int", "int"] ], [ "string", ["int", "string"] ], [ "string", ["int", "int", "string"] ], [ "string", ["string"] ], [ "string", ["string", "int"] ], [ "string", ["string", "int", "int"] ], [ "string", ["string", "string"] ], [ "string", ["string", "string", "int"] ], [ "string", ["string", "string", "int", "int"] ], [ "string", ["string", "string", "string"] ], [ "string", ["string", "string", "string", "int"] ], [ "string", ["string", "string", "string", "string"] ], [ "string", ["string", "string", "string", "string", "int"] ], [ "string", ["string", "string", "string", "string", "int", "string", "string"] ], [ "string", ["string", "string", "string", "string", "string"] ], [ "string", ["string", "string", "string", "string", "string", "int"] ], [ "string", ["string", "string", "string", "int", "string", "string"] ], [ "string", ["string", "string", "string", "string", "string", "string", "int"] ], [ "string", ["string", "string", "string", "string", "string", "string", "int", "int"] ], [ "string", ["string", "string", "string", "string", "string", "string"] ], [ "string", ["string", "string", "string", "string", "string", "string", "int64"] ], [ "string", ["string", "string", "string", "string", "string", "string", "int64", "int"] ], [ "string", ["string", "string", "string", "string", "string", "string", "string"] ], [ "string", ["string", "string", "string", "string", "string", "string", "string", "int"] ], [ "string", ["string", "string", "string", "string", "string", "string", "string", "int64"] ], [ "string", ["string", "string", "string", "string", "string", "string", "string", "string", "string"] ], [ "string", ["string", "string", "string", "string", "string", "string", "string", "int", "string", "string", "string"] ], [ "string", ["string", "int", "string", "string", "string", "string", "string", "string", "string", "string", "string", "string", "int", "string"] ], [ "string", ["string", "int", "string", "int", "int"] ], [ "string", ["string", "int", "string", "string", "string"] ], [ "objlist", [] ], [ "objlist", ["int"] ], [ "objlist", ["int", "int"] ], [ "objlist", ["int", "string"] ], [ "objlist", ["int", "int", "int"] ], [ "objlist", ["int", "int", "string"] ], [ "objlist", ["int", "int", "string", "int"] ], [ "objlist", ["string"] ], [ "objlist", ["string", "int"] ], [ "objlist", ["string", "int", "int"] ], [ "objlist", ["string", "int", "int", "int"] ], [ "objlist", ["string", "int", "string"] ], [ "objlist", ["string", "string"] ], [ "objlist", ["string", "string", "string"] ], [ "objlist", ["string", "string", "int"] ], [ "objlist", ["string", "string", "string", "int"] ], [ "objlist", ["string", "string", "int", "int"] ], [ "objlist", ["string", "int", "int", "string"] ], [ "objlist", ["string", "string", "int", "int", "int"] ], [ "objlist", ["string", "string", "string", "int", "int", "int"] ], [ "objlist", ["int", "string", "string", "int", "int"] ], [ "objlist", ["string", "int", "string", "string", "string"] ], [ "objlist", ["string", "int", "string", "int", "int"] ], [ "objlist", ["string", "int", "string", "string", "int"] ], [ "objlist", ["string", "string", "string", "string", "int", "int"] ], [ "object", [] ], [ "object", ["int"] ], [ "object", ["string"] ], [ "object", ["string", "string"] ], [ "object", ["string", "string", "string"] ], [ "object", ["string", "int", "string"] ], [ "object", ["int", "string", "string"] ], [ "object", ["int", "string", "string", "string", "string"] ], [ "object", ["string", "string", "int", "int"] ], [ "object", ["string", "string", "string", "int"] ], [ "object", ["string", "string", "string", "string", "string", "string", "string", "int", "int"] ], [ "object", ["string", "string", "string", "string", "string", "string", "int", "string", "int", "int"] ], ["json", ["string"]], ] ================================================ FILE: lib/seahub.vala ================================================ namespace Seafile { public class ShareLinkInfo : Object { public string repo_id { set; get; } public string file_path { set; get; } public string parent_dir { set; get; } public string share_type { set; get; } } } ================================================ FILE: lib/search-result.vala ================================================ // compile this file with `valac --pkg posix repo.vala -C -H repo.h` namespace Seafile { public class SearchResult: Object { public string _path; public string path { get { return _path; } set { _path = value; } } public int64 size { get; set; } public int64 mtime { get; set; } public bool is_dir { set; get; } } } // namespace ================================================ FILE: lib/task.vala ================================================ namespace Seafile { public class Task : Object { public char _tx_id[37]; public string tx_id { get { return (string)_tx_id; } set { Posix.memcpy(_tx_id, value, 36); _tx_id[36] = '\0'; } } public string ttype { get; set; } public string repo_id { get; set; } public string dest_id { get; set; } public string from_branch { get; set; } public string to_branch { get; set; } public string state { get; set; } public string rt_state { get; set; } public string error_str { get; set; } public int block_total { get; set; } public int block_done { get; set; } // the number of blocks sent or received public int fs_objects_total { get; set; } public int fs_objects_done { get; set; } public int rate { get; set; } public int64 _rsize; // the size remain public int64 rsize{ get { return _rsize; } set { _rsize = value; } } public int64 _dsize; // the size has done public int64 dsize { get { return _dsize; } set { _dsize = value; } } } public class CloneTask : Object { public string state { get; set; } public string error_str { get; set; } public string repo_id { get; set; } public string peer_id { get; set; } public string repo_name { get; set; } public string worktree { get; set; } public string tx_id { get; set; } } } // namespace ================================================ FILE: lib/timer.c ================================================ /* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */ #if defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__) #include #include #include #else #include #endif #include #include "utils.h" #include "timer.h" struct CcnetTimer { struct event event; struct timeval tv; TimerCB func; void *user_data; uint8_t inCallback; }; static void timer_callback (evutil_socket_t fd, short event, void *vtimer) { int more; struct CcnetTimer *timer = vtimer; timer->inCallback = 1; more = (*timer->func) (timer->user_data); timer->inCallback = 0; if (more) evtimer_add (&timer->event, &timer->tv); else ccnet_timer_free (&timer); } void ccnet_timer_free (CcnetTimer **ptimer) { CcnetTimer *timer; /* zero out the argument passed in */ g_return_if_fail (ptimer); timer = *ptimer; *ptimer = NULL; /* destroy the timer directly or via the command queue */ if (timer && !timer->inCallback) { event_del (&timer->event); g_free (timer); } } CcnetTimer* ccnet_timer_new (TimerCB func, void *user_data, uint64_t interval_milliseconds) { CcnetTimer *timer = g_new0 (CcnetTimer, 1); timer->tv = timeval_from_msec (interval_milliseconds); timer->func = func; timer->user_data = user_data; evtimer_set (&timer->event, timer_callback, timer); evtimer_add (&timer->event, &timer->tv); return timer; } ================================================ FILE: lib/timer.h ================================================ /* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */ #ifndef CCNET_TIMER_H #define CCNET_TIMER_H /* return TRUE to reschedule the timer, return FALSE to cancle the timer */ typedef int (*TimerCB) (void *data); struct CcnetTimer; typedef struct CcnetTimer CcnetTimer; /** * Calls timer_func(user_data) after the specified interval. * The timer is freed if timer_func returns zero. * Otherwise, it's called again after the same interval. */ CcnetTimer* ccnet_timer_new (TimerCB func, void *user_data, uint64_t timeout_milliseconds); /** * Frees a timer and sets the timer pointer to NULL. */ void ccnet_timer_free (CcnetTimer **timer); #endif ================================================ FILE: lib/utils.c ================================================ /* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */ #include #include "common.h" #ifdef WIN32 #ifndef _WIN32_WINNT #define _WIN32_WINNT 0x500 #endif #endif #include "utils.h" #ifdef WIN32 #include #include #include #include #include #include #else #include #endif #ifndef WIN32 #include #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include extern int inet_pton(int af, const char *src, void *dst); struct timeval timeval_from_msec (uint64_t milliseconds) { struct timeval ret; const uint64_t microseconds = milliseconds * 1000; ret.tv_sec = microseconds / 1000000; ret.tv_usec = microseconds % 1000000; return ret; } void rawdata_to_hex (const unsigned char *rawdata, char *hex_str, int n_bytes) { static const char hex[] = "0123456789abcdef"; int i; for (i = 0; i < n_bytes; i++) { unsigned int val = *rawdata++; *hex_str++ = hex[val >> 4]; *hex_str++ = hex[val & 0xf]; } *hex_str = '\0'; } static unsigned hexval(char c) { if (c >= '0' && c <= '9') return c - '0'; if (c >= 'a' && c <= 'f') return c - 'a' + 10; if (c >= 'A' && c <= 'F') return c - 'A' + 10; return ~0; } int hex_to_rawdata (const char *hex_str, unsigned char *rawdata, int n_bytes) { int i; for (i = 0; i < n_bytes; i++) { unsigned int val = (hexval(hex_str[0]) << 4) | hexval(hex_str[1]); if (val & ~0xff) return -1; *rawdata++ = val; hex_str += 2; } return 0; } size_t ccnet_strlcpy (char *dest, const char *src, size_t size) { size_t ret = strlen(src); if (size) { size_t len = (ret >= size) ? size - 1 : ret; memcpy(dest, src, len); dest[len] = '\0'; } return ret; } int checkdir (const char *dir) { SeafStat st; #ifdef WIN32 /* remove trailing '\\' */ char *path = g_strdup(dir); char *p = (char *)path + strlen(path) - 1; while (*p == '\\' || *p == '/') *p-- = '\0'; if ((seaf_stat(dir, &st) < 0) || !S_ISDIR(st.st_mode)) { g_free (path); return -1; } g_free (path); return 0; #else if ((seaf_stat(dir, &st) < 0) || !S_ISDIR(st.st_mode)) return -1; return 0; #endif } int checkdir_with_mkdir (const char *dir) { #ifdef WIN32 int ret; char *path = g_strdup(dir); char *p = (char *)path + strlen(path) - 1; while (*p == '\\' || *p == '/') *p-- = '\0'; ret = g_mkdir_with_parents(path, 0755); g_free (path); return ret; #else return g_mkdir_with_parents(dir, 0755); #endif } int objstore_mkdir (const char *base) { int ret; int i, j, len; static const char hex[] = "0123456789abcdef"; char subdir[SEAF_PATH_MAX]; if ( (ret = checkdir_with_mkdir(base)) < 0) return ret; len = strlen(base); memcpy(subdir, base, len); subdir[len] = G_DIR_SEPARATOR; subdir[len+3] = '\0'; for (i = 0; i < 16; i++) { subdir[len+1] = hex[i]; for (j = 0; j < 16; j++) { subdir[len+2] = hex[j]; if ( (ret = checkdir_with_mkdir(subdir)) < 0) return ret; } } return 0; } void objstore_get_path (char *path, const char *base, const char *obj_id) { int len; len = strlen(base); memcpy(path, base, len); path[len] = G_DIR_SEPARATOR; path[len+1] = obj_id[0]; path[len+2] = obj_id[1]; path[len+3] = G_DIR_SEPARATOR; strcpy(path+len+4, obj_id+2); } #ifdef WIN32 /* UNIX epoch expressed in Windows time, the unit is 100 nanoseconds. * See http://msdn.microsoft.com/en-us/library/ms724228 */ #define UNIX_EPOCH 116444736000000000ULL __time64_t file_time_to_unix_time (FILETIME *ftime) { guint64 win_time, unix_time; win_time = (guint64)ftime->dwLowDateTime + (((guint64)ftime->dwHighDateTime)<<32); unix_time = (win_time - UNIX_EPOCH)/10000000; return (__time64_t)unix_time; } static int get_utc_file_time_fd (int fd, __time64_t *mtime, __time64_t *ctime) { HANDLE handle; FILETIME write_time, create_time; handle = (HANDLE)_get_osfhandle (fd); if (handle == INVALID_HANDLE_VALUE) { g_warning ("Failed to get handle from fd: %lu.\n", GetLastError()); return -1; } if (!GetFileTime (handle, &create_time, NULL, &write_time)) { g_warning ("Failed to get file time: %lu.\n", GetLastError()); return -1; } *mtime = file_time_to_unix_time (&write_time); *ctime = file_time_to_unix_time (&create_time); return 0; } #define EPOCH_DIFF 11644473600ULL inline static void unix_time_to_file_time (guint64 unix_time, FILETIME *ftime) { guint64 win_time; win_time = (unix_time + EPOCH_DIFF) * 10000000; ftime->dwLowDateTime = win_time & 0xFFFFFFFF; ftime->dwHighDateTime = (win_time >> 32) & 0xFFFFFFFF; } static int set_utc_file_time (const char *path, const wchar_t *wpath, guint64 mtime) { HANDLE handle; FILETIME write_time; handle = CreateFileW (wpath, GENERIC_WRITE, FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE, NULL, OPEN_EXISTING, FILE_FLAG_BACKUP_SEMANTICS, NULL); if (handle == INVALID_HANDLE_VALUE) { g_warning ("Failed to open %s: %lu.\n", path, GetLastError()); return -1; } unix_time_to_file_time (mtime, &write_time); if (!SetFileTime (handle, NULL, NULL, &write_time)) { g_warning ("Failed to set file time for %s: %lu.\n", path, GetLastError()); CloseHandle (handle); return -1; } CloseHandle (handle); return 0; } wchar_t * win32_long_path (const char *path) { char *long_path, *p; wchar_t *long_path_w; if (strncmp(path, "//", 2) == 0) long_path = g_strconcat ("\\\\?\\UNC\\", path + 2, NULL); else long_path = g_strconcat ("\\\\?\\", path, NULL); for (p = long_path; *p != 0; ++p) if (*p == '/') *p = '\\'; long_path_w = g_utf8_to_utf16 (long_path, -1, NULL, NULL, NULL); g_free (long_path); return long_path_w; } /* Convert a (possible) 8.3 format path to long path */ wchar_t * win32_83_path_to_long_path (const char *worktree, const wchar_t *path, int path_len) { wchar_t *worktree_w = g_utf8_to_utf16 (worktree, -1, NULL, NULL, NULL); int wt_len; wchar_t *p; wchar_t *fullpath_w = NULL; wchar_t *fullpath_long = NULL; wchar_t *ret = NULL; char *fullpath; for (p = worktree_w; *p != L'\0'; ++p) if (*p == L'/') *p = L'\\'; wt_len = wcslen(worktree_w); fullpath_w = g_new0 (wchar_t, wt_len + path_len + 6); wcscpy (fullpath_w, L"\\\\?\\"); wcscat (fullpath_w, worktree_w); wcscat (fullpath_w, L"\\"); wcsncat (fullpath_w, path, path_len); fullpath_long = g_new0 (wchar_t, SEAF_PATH_MAX); DWORD n = GetLongPathNameW (fullpath_w, fullpath_long, SEAF_PATH_MAX); if (n == 0) { /* Failed. */ fullpath = g_utf16_to_utf8 (fullpath_w, -1, NULL, NULL, NULL); g_free (fullpath); goto out; } else if (n > SEAF_PATH_MAX) { /* In this case n is the necessary length for the buf. */ g_free (fullpath_long); fullpath_long = g_new0 (wchar_t, n); if (GetLongPathNameW (fullpath_w, fullpath_long, n) != (n - 1)) { fullpath = g_utf16_to_utf8 (fullpath_w, -1, NULL, NULL, NULL); g_free (fullpath); goto out; } } /* Remove "\\?\worktree\" from the beginning. */ ret = wcsdup (fullpath_long + wt_len + 5); out: g_free (worktree_w); g_free (fullpath_w); g_free (fullpath_long); return ret; } static int windows_error_to_errno (DWORD error) { switch (error) { case ERROR_FILE_NOT_FOUND: case ERROR_PATH_NOT_FOUND: return ENOENT; case ERROR_ALREADY_EXISTS: return EEXIST; case ERROR_ACCESS_DENIED: case ERROR_SHARING_VIOLATION: return EACCES; case ERROR_DIR_NOT_EMPTY: return ENOTEMPTY; default: return 0; } } #endif int seaf_stat (const char *path, SeafStat *st) { #ifdef WIN32 wchar_t *wpath = win32_long_path (path); WIN32_FILE_ATTRIBUTE_DATA attrs; int ret = 0; if (!GetFileAttributesExW (wpath, GetFileExInfoStandard, &attrs)) { ret = -1; errno = windows_error_to_errno (GetLastError()); goto out; } memset (st, 0, sizeof(SeafStat)); if (attrs.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY) st->st_mode = (S_IFDIR | S_IRWXU); else st->st_mode = (S_IFREG | S_IRUSR | S_IWUSR); st->st_atime = file_time_to_unix_time (&attrs.ftLastAccessTime); st->st_ctime = file_time_to_unix_time (&attrs.ftCreationTime); st->st_mtime = file_time_to_unix_time (&attrs.ftLastWriteTime); st->st_size = ((((__int64)attrs.nFileSizeHigh)<<32) + attrs.nFileSizeLow); out: g_free (wpath); return ret; #else return stat (path, st); #endif } int seaf_fstat (int fd, SeafStat *st) { #ifdef WIN32 if (_fstat64 (fd, st) < 0) return -1; if (get_utc_file_time_fd (fd, &st->st_mtime, &st->st_ctime) < 0) return -1; return 0; #else return fstat (fd, st); #endif } #ifdef WIN32 void seaf_stat_from_find_data (WIN32_FIND_DATAW *fdata, SeafStat *st) { memset (st, 0, sizeof(SeafStat)); if (fdata->dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY) st->st_mode = (S_IFDIR | S_IRWXU); else st->st_mode = (S_IFREG | S_IRUSR | S_IWUSR); st->st_atime = file_time_to_unix_time (&fdata->ftLastAccessTime); st->st_ctime = file_time_to_unix_time (&fdata->ftCreationTime); st->st_mtime = file_time_to_unix_time (&fdata->ftLastWriteTime); st->st_size = ((((__int64)fdata->nFileSizeHigh)<<32) + fdata->nFileSizeLow); } #endif int seaf_set_file_time (const char *path, guint64 mtime) { #ifndef WIN32 struct stat st; struct utimbuf times; if (stat (path, &st) < 0) { g_warning ("Failed to stat %s: %s.\n", path, strerror(errno)); return -1; } times.actime = st.st_atime; times.modtime = (time_t)mtime; return utime (path, ×); #else wchar_t *wpath = win32_long_path (path); int ret = 0; if (set_utc_file_time (path, wpath, mtime) < 0) ret = -1; g_free (wpath); return ret; #endif } int seaf_util_unlink (const char *path) { #ifdef WIN32 wchar_t *wpath = win32_long_path (path); int ret = 0; if (!DeleteFileW (wpath)) { ret = -1; errno = windows_error_to_errno (GetLastError()); } g_free (wpath); return ret; #else return unlink (path); #endif } int seaf_util_rmdir (const char *path) { #ifdef WIN32 wchar_t *wpath = win32_long_path (path); int ret = 0; if (!RemoveDirectoryW (wpath)) { ret = -1; errno = windows_error_to_errno (GetLastError()); } g_free (wpath); return ret; #else return rmdir (path); #endif } int seaf_util_mkdir (const char *path, mode_t mode) { #ifdef WIN32 wchar_t *wpath = win32_long_path (path); int ret = 0; if (!CreateDirectoryW (wpath, NULL)) { ret = -1; errno = windows_error_to_errno (GetLastError()); } g_free (wpath); return ret; #else return mkdir (path, mode); #endif } int seaf_util_open (const char *path, int flags) { #ifdef WIN32 wchar_t *wpath; DWORD access = 0; HANDLE handle; int fd; access |= GENERIC_READ; if (flags & (O_WRONLY | O_RDWR)) access |= GENERIC_WRITE; wpath = win32_long_path (path); handle = CreateFileW (wpath, access, FILE_SHARE_DELETE | FILE_SHARE_READ | FILE_SHARE_WRITE, NULL, OPEN_EXISTING, 0, NULL); if (handle == INVALID_HANDLE_VALUE) { errno = windows_error_to_errno (GetLastError()); g_free (wpath); return -1; } fd = _open_osfhandle ((intptr_t)handle, 0); g_free (wpath); return fd; #else return open (path, flags); #endif } int seaf_util_create (const char *path, int flags, mode_t mode) { #ifdef WIN32 wchar_t *wpath; DWORD access = 0; HANDLE handle; int fd; access |= GENERIC_READ; if (flags & (O_WRONLY | O_RDWR)) access |= GENERIC_WRITE; wpath = win32_long_path (path); handle = CreateFileW (wpath, access, FILE_SHARE_DELETE | FILE_SHARE_READ | FILE_SHARE_WRITE, NULL, CREATE_ALWAYS, 0, NULL); if (handle == INVALID_HANDLE_VALUE) { errno = windows_error_to_errno (GetLastError()); g_free (wpath); return -1; } fd = _open_osfhandle ((intptr_t)handle, 0); g_free (wpath); return fd; #else return open (path, flags, mode); #endif } int seaf_util_rename (const char *oldpath, const char *newpath) { #ifdef WIN32 wchar_t *oldpathw = win32_long_path (oldpath); wchar_t *newpathw = win32_long_path (newpath); int ret = 0; if (!MoveFileExW (oldpathw, newpathw, MOVEFILE_REPLACE_EXISTING)) { ret = -1; errno = windows_error_to_errno (GetLastError()); } g_free (oldpathw); g_free (newpathw); return ret; #else return rename (oldpath, newpath); #endif } gboolean seaf_util_exists (const char *path) { #ifdef WIN32 wchar_t *wpath = win32_long_path (path); DWORD attrs; gboolean ret; attrs = GetFileAttributesW (wpath); ret = (attrs != INVALID_FILE_ATTRIBUTES); g_free (wpath); return ret; #else return (access (path, F_OK) == 0); #endif } gint64 seaf_util_lseek (int fd, gint64 offset, int whence) { #ifdef WIN32 return _lseeki64 (fd, offset, whence); #else return lseek (fd, offset, whence); #endif } #ifdef WIN32 int traverse_directory_win32 (wchar_t *path_w, DirentCallback callback, void *user_data) { WIN32_FIND_DATAW fdata; HANDLE handle; wchar_t *pattern; char *path; int path_len_w; DWORD error; gboolean stop; int ret = 0; path = g_utf16_to_utf8 (path_w, -1, NULL, NULL, NULL); path_len_w = wcslen(path_w); pattern = g_new0 (wchar_t, (path_len_w + 3)); wcscpy (pattern, path_w); wcscat (pattern, L"\\*"); handle = FindFirstFileW (pattern, &fdata); if (handle == INVALID_HANDLE_VALUE) { g_warning ("FindFirstFile failed %s: %lu.\n", path, GetLastError()); ret = -1; goto out; } do { if (wcscmp (fdata.cFileName, L".") == 0 || wcscmp (fdata.cFileName, L"..") == 0) continue; ++ret; stop = FALSE; if (callback (path_w, &fdata, user_data, &stop) < 0) { ret = -1; FindClose (handle); goto out; } if (stop) { FindClose (handle); goto out; } } while (FindNextFileW (handle, &fdata) != 0); error = GetLastError(); if (error != ERROR_NO_MORE_FILES) { g_warning ("FindNextFile failed %s: %lu.\n", path, error); ret = -1; } FindClose (handle); out: g_free (path); g_free (pattern); return ret; } #endif ssize_t readn (int fd, void *buf, size_t n) { size_t n_left; ssize_t n_read; char *ptr; ptr = buf; n_left = n; while (n_left > 0) { n_read = read(fd, ptr, n_left); if (n_read < 0) { if (errno == EINTR) n_read = 0; else return -1; } else if (n_read == 0) break; n_left -= n_read; ptr += n_read; } return (n - n_left); } ssize_t writen (int fd, const void *buf, size_t n) { size_t n_left; ssize_t n_written; const char *ptr; ptr = buf; n_left = n; while (n_left > 0) { n_written = write(fd, ptr, n_left); if (n_written <= 0) { if (n_written < 0 && errno == EINTR) n_written = 0; else return -1; } n_left -= n_written; ptr += n_written; } return n; } ssize_t recvn (evutil_socket_t fd, void *buf, size_t n) { size_t n_left; ssize_t n_read; char *ptr; ptr = buf; n_left = n; while (n_left > 0) { #ifndef WIN32 if ((n_read = read(fd, ptr, n_left)) < 0) #else if ((n_read = recv(fd, ptr, n_left, 0)) < 0) #endif { if (errno == EINTR) n_read = 0; else return -1; } else if (n_read == 0) break; n_left -= n_read; ptr += n_read; } return (n - n_left); } ssize_t sendn (evutil_socket_t fd, const void *buf, size_t n) { size_t n_left; ssize_t n_written; const char *ptr; ptr = buf; n_left = n; while (n_left > 0) { #ifndef WIN32 if ( (n_written = write(fd, ptr, n_left)) <= 0) #else if ( (n_written = send(fd, ptr, n_left, 0)) <= 0) #endif { if (n_written < 0 && errno == EINTR) n_written = 0; else return -1; } n_left -= n_written; ptr += n_written; } return n; } int copy_fd (int ifd, int ofd) { while (1) { char buffer[8192]; ssize_t len = readn (ifd, buffer, sizeof(buffer)); if (!len) break; if (len < 0) { close (ifd); return -1; } if (writen (ofd, buffer, len) < 0) { close (ofd); return -1; } } close(ifd); return 0; } int copy_file (const char *dst, const char *src, int mode) { int fdi, fdo, status; if ((fdi = g_open (src, O_RDONLY | O_BINARY, 0)) < 0) return fdi; fdo = g_open (dst, O_WRONLY | O_CREAT | O_EXCL | O_BINARY, mode); if (fdo < 0 && errno == EEXIST) { close (fdi); return 0; } else if (fdo < 0){ close (fdi); return -1; } status = copy_fd (fdi, fdo); if (close (fdo) != 0) return -1; return status; } char* ccnet_expand_path (const char *src) { int total_len = 0; #ifdef WIN32 char new_path[SEAF_PATH_MAX + 1]; char *p = new_path; const char *q = src; memset(new_path, 0, sizeof(new_path)); if (*src == '~') { const char *home = g_get_home_dir(); total_len += strlen(home); if (total_len > SEAF_PATH_MAX) { return NULL; } memcpy(new_path, home, strlen(home)); p += strlen(new_path); q++; } total_len += strlen(q); if (total_len > SEAF_PATH_MAX) { return NULL; } memcpy(p, q, strlen(q)); /* delete the charactor '\' or '/' at the end of the path * because the function stat faied to deal with directory names * with '\' or '/' in the end */ p = new_path + strlen(new_path) - 1; while(*p == '\\' || *p == '/') *p-- = '\0'; return strdup (new_path); #else const char *next_in, *ntoken; char new_path[SEAF_PATH_MAX + 1]; char *next_out; int len; /* special cases */ if (!src || *src == '\0') return NULL; if (strlen(src) > SEAF_PATH_MAX) return NULL; next_in = src; next_out = new_path; *next_out = '\0'; if (*src == '~') { /* handle src start with '~' or '~' like '~plt' */ struct passwd *pw = NULL; for ( ; *next_in != '/' && *next_in != '\0'; next_in++) ; len = next_in - src; if (len == 1) { pw = getpwuid (geteuid()); } else { /* copy '~' to new_path */ if (len > SEAF_PATH_MAX) { return NULL; } memcpy (new_path, src, len); new_path[len] = '\0'; pw = getpwnam (new_path + 1); } if (pw == NULL) return NULL; len = strlen (pw->pw_dir); total_len += len; if (total_len > SEAF_PATH_MAX) { return NULL; } memcpy (new_path, pw->pw_dir, len); next_out = new_path + len; *next_out = '\0'; if (*next_in == '\0') return strdup (new_path); } else if (*src != '/') { getcwd (new_path, SEAF_PATH_MAX); for ( ; *next_out; next_out++) ; /* to '\0' */ } while (*next_in != '\0') { /* move ntoken to the next not '/' char */ for (ntoken = next_in; *ntoken == '/'; ntoken++) ; for (next_in = ntoken; *next_in != '/' && *next_in != '\0'; next_in++) ; len = next_in - ntoken; if (len == 0) { /* the path ends with '/', keep it */ *next_out++ = '/'; *next_out = '\0'; break; } if (len == 2 && ntoken[0] == '.' && ntoken[1] == '.') { /* '..' */ for (; next_out > new_path && *next_out != '/'; next_out--) ; *next_out = '\0'; } else if (ntoken[0] != '.' || len != 1) { /* not '.' */ *next_out++ = '/'; total_len += len; if (total_len > SEAF_PATH_MAX) { return NULL; } memcpy (next_out, ntoken, len); next_out += len; *next_out = '\0'; } } /* the final special case */ if (new_path[0] == '\0') { new_path[0] = '/'; new_path[1] = '\0'; } return strdup (new_path); #endif } int calculate_sha1 (unsigned char *sha1, const char *msg, int len) { SHA_CTX c; if (len < 0) len = strlen(msg); SHA1_Init(&c); SHA1_Update(&c, msg, len); SHA1_Final(sha1, &c); return 0; } uint32_t ccnet_sha1_hash (const void *v) { /* 31 bit hash function */ const unsigned char *p = v; uint32_t h = 0; int i; for (i = 0; i < 20; i++) h = (h << 5) - h + p[i]; return h; } int ccnet_sha1_equal (const void *v1, const void *v2) { const unsigned char *p1 = v1; const unsigned char *p2 = v2; int i; for (i = 0; i < 20; i++) if (p1[i] != p2[i]) return 0; return 1; } #ifndef WIN32 char* gen_uuid () { char *uuid_str = g_malloc (37); uuid_t uuid; uuid_generate (uuid); uuid_unparse_lower (uuid, uuid_str); return uuid_str; } void gen_uuid_inplace (char *buf) { uuid_t uuid; uuid_generate (uuid); uuid_unparse_lower (uuid, buf); } gboolean is_uuid_valid (const char *uuid_str) { uuid_t uuid; if (!uuid_str) return FALSE; if (uuid_parse (uuid_str, uuid) < 0) return FALSE; return TRUE; } #else char* gen_uuid () { char *uuid_str = g_malloc (37); unsigned char *str = NULL; UUID uuid; UuidCreate(&uuid); UuidToString(&uuid, &str); memcpy(uuid_str, str, 37); RpcStringFree(&str); return uuid_str; } void gen_uuid_inplace (char *buf) { unsigned char *str = NULL; UUID uuid; UuidCreate(&uuid); UuidToString(&uuid, &str); memcpy(buf, str, 37); RpcStringFree(&str); } gboolean is_uuid_valid (const char *uuid_str) { if (!uuid_str) return FALSE; UUID uuid; if (UuidFromString((unsigned char *)uuid_str, &uuid) != RPC_S_OK) return FALSE; return TRUE; } #endif gboolean is_object_id_valid (const char *obj_id) { if (!obj_id) return FALSE; int len = strlen(obj_id); int i; char c; if (len != 40) return FALSE; for (i = 0; i < len; ++i) { c = obj_id[i]; if ((c >= '0' && c <= '9') || (c >= 'a' && c <= 'f')) continue; return FALSE; } return TRUE; } char* strjoin_n (const char *seperator, int argc, char **argv) { GString *buf; int i; char *str; if (argc == 0) return NULL; buf = g_string_new (argv[0]); for (i = 1; i < argc; ++i) { g_string_append (buf, seperator); g_string_append (buf, argv[i]); } str = buf->str; g_string_free (buf, FALSE); return str; } gboolean is_ipaddr_valid (const char *ip) { unsigned char buf[sizeof(struct in6_addr)]; if (evutil_inet_pton(AF_INET, ip, buf) == 1) return TRUE; if (evutil_inet_pton(AF_INET6, ip, buf) == 1) return TRUE; return FALSE; } void parse_key_value_pairs (char *string, KeyValueFunc func, void *data) { char *line = string, *next, *space; char *key, *value; while (*line) { /* handle empty line */ if (*line == '\n') { ++line; continue; } for (next = line; *next != '\n' && *next; ++next) ; *next = '\0'; for (space = line; space < next && *space != ' '; ++space) ; if (*space != ' ') { g_warning ("Bad key value format: %s\n", line); return; } *space = '\0'; key = line; value = space + 1; func (data, key, value); line = next + 1; } } void parse_key_value_pairs2 (char *string, KeyValueFunc2 func, void *data) { char *line = string, *next, *space; char *key, *value; while (*line) { /* handle empty line */ if (*line == '\n') { ++line; continue; } for (next = line; *next != '\n' && *next; ++next) ; *next = '\0'; for (space = line; space < next && *space != ' '; ++space) ; if (*space != ' ') { g_warning ("Bad key value format: %s\n", line); return; } *space = '\0'; key = line; value = space + 1; if (func(data, key, value) == FALSE) break; line = next + 1; } } /** * string_list_is_exists: * @str_list: * @string: a C string or %NULL * * Check whether @string is in @str_list. * * returns: %TRUE if @string is in str_list, %FALSE otherwise */ gboolean string_list_is_exists (GList *str_list, const char *string) { GList *ptr; for (ptr = str_list; ptr; ptr = ptr->next) { if (g_strcmp0(string, ptr->data) == 0) return TRUE; } return FALSE; } /** * string_list_append: * @str_list: * @string: a C string (can't be %NULL * * Append @string to @str_list if it is in the list. * * returns: the new start of the list */ GList* string_list_append (GList *str_list, const char *string) { g_return_val_if_fail (string != NULL, str_list); if (string_list_is_exists(str_list, string)) return str_list; str_list = g_list_append (str_list, g_strdup(string)); return str_list; } GList * string_list_append_sorted (GList *str_list, const char *string) { g_return_val_if_fail (string != NULL, str_list); if (string_list_is_exists(str_list, string)) return str_list; str_list = g_list_insert_sorted_with_data (str_list, g_strdup(string), (GCompareDataFunc)g_strcmp0, NULL); return str_list; } GList * string_list_remove (GList *str_list, const char *string) { g_return_val_if_fail (string != NULL, str_list); GList *ptr; for (ptr = str_list; ptr; ptr = ptr->next) { if (strcmp((char *)ptr->data, string) == 0) { g_free (ptr->data); return g_list_delete_link (str_list, ptr); } } return str_list; } void string_list_free (GList *str_list) { GList *ptr = str_list; while (ptr) { g_free (ptr->data); ptr = ptr->next; } g_list_free (str_list); } void string_list_join (GList *str_list, GString *str, const char *seperator) { GList *ptr; if (!str_list) return; ptr = str_list; g_string_append (str, ptr->data); for (ptr = ptr->next; ptr; ptr = ptr->next) { g_string_append (str, seperator); g_string_append (str, (char *)ptr->data); } } GList * string_list_parse (const char *list_in_str, const char *seperator) { if (!list_in_str) return NULL; GList *list = NULL; char **array = g_strsplit (list_in_str, seperator, 0); char **ptr; for (ptr = array; *ptr; ptr++) { list = g_list_prepend (list, g_strdup(*ptr)); } list = g_list_reverse (list); g_strfreev (array); return list; } GList * string_list_parse_sorted (const char *list_in_str, const char *seperator) { GList *list = string_list_parse (list_in_str, seperator); return g_list_sort (list, (GCompareFunc)g_strcmp0); } gboolean string_list_sorted_is_equal (GList *list1, GList *list2) { GList *ptr1 = list1, *ptr2 = list2; while (ptr1 && ptr2) { if (g_strcmp0(ptr1->data, ptr2->data) != 0) break; ptr1 = ptr1->next; ptr2 = ptr2->next; } if (!ptr1 && !ptr2) return TRUE; return FALSE; } char ** ncopy_string_array (char **orig, int n) { char **ret = g_malloc (sizeof(char *) * n); int i = 0; for (; i < n; i++) ret[i] = g_strdup(orig[i]); return ret; } void nfree_string_array (char **array, int n) { int i = 0; for (; i < n; i++) g_free (array[i]); g_free (array); } gint64 get_current_time() { return g_get_real_time(); } #ifdef WIN32 static SOCKET pg_serv_sock = INVALID_SOCKET; static struct sockaddr_in pg_serv_addr; /* pgpipe() should only be called in the main loop, * since it accesses the static global socket. */ int pgpipe (ccnet_pipe_t handles[2]) { int len = sizeof( pg_serv_addr ); handles[0] = handles[1] = INVALID_SOCKET; if (pg_serv_sock == INVALID_SOCKET) { if ((pg_serv_sock = socket(AF_INET, SOCK_STREAM, 0)) == INVALID_SOCKET) { g_warning("pgpipe failed to create socket: %d\n", WSAGetLastError()); return -1; } memset(&pg_serv_addr, 0, sizeof(pg_serv_addr)); pg_serv_addr.sin_family = AF_INET; pg_serv_addr.sin_port = htons(0); pg_serv_addr.sin_addr.s_addr = htonl(INADDR_LOOPBACK); if (bind(pg_serv_sock, (SOCKADDR *)&pg_serv_addr, len) == SOCKET_ERROR) { g_warning("pgpipe failed to bind: %d\n", WSAGetLastError()); closesocket(pg_serv_sock); pg_serv_sock = INVALID_SOCKET; return -1; } if (listen(pg_serv_sock, SOMAXCONN) == SOCKET_ERROR) { g_warning("pgpipe failed to listen: %d\n", WSAGetLastError()); closesocket(pg_serv_sock); pg_serv_sock = INVALID_SOCKET; return -1; } struct sockaddr_in tmp_addr; int tmp_len = sizeof(tmp_addr); if (getsockname(pg_serv_sock, (SOCKADDR *)&tmp_addr, &tmp_len) == SOCKET_ERROR) { g_warning("pgpipe failed to getsockname: %d\n", WSAGetLastError()); closesocket(pg_serv_sock); pg_serv_sock = INVALID_SOCKET; return -1; } pg_serv_addr.sin_port = tmp_addr.sin_port; } if ((handles[1] = socket(PF_INET, SOCK_STREAM, 0)) == INVALID_SOCKET) { g_warning("pgpipe failed to create socket 2: %d\n", WSAGetLastError()); closesocket(pg_serv_sock); pg_serv_sock = INVALID_SOCKET; return -1; } if (connect(handles[1], (SOCKADDR *)&pg_serv_addr, len) == SOCKET_ERROR) { g_warning("pgpipe failed to connect socket: %d\n", WSAGetLastError()); closesocket(handles[1]); handles[1] = INVALID_SOCKET; closesocket(pg_serv_sock); pg_serv_sock = INVALID_SOCKET; return -1; } struct sockaddr_in client_addr; int client_len = sizeof(client_addr); if ((handles[0] = accept(pg_serv_sock, (SOCKADDR *)&client_addr, &client_len)) == INVALID_SOCKET) { g_warning("pgpipe failed to accept socket: %d\n", WSAGetLastError()); closesocket(handles[1]); handles[1] = INVALID_SOCKET; closesocket(pg_serv_sock); pg_serv_sock = INVALID_SOCKET; return -1; } return 0; } #endif /* The EVP_EncryptXXX and EVP_DecryptXXX series of functions have a weird choice of returned value. */ #define ENC_SUCCESS 1 #define ENC_FAILURE 0 #define DEC_SUCCESS 1 #define DEC_FAILURE 0 #include #include /* Block size, in bytes. For AES it can only be 16 bytes. */ #define BLK_SIZE 16 #define ENCRYPT_BLK_SIZE BLK_SIZE int ccnet_encrypt (char **data_out, int *out_len, const char *data_in, const int in_len, const char *code, const int code_len) { *data_out = NULL; *out_len = -1; /* check validation */ if ( data_in == NULL || in_len <= 0 || code == NULL || code_len <= 0) { g_warning ("Invalid params.\n"); return -1; } EVP_CIPHER_CTX *ctx; int ret, key_len; unsigned char key[16], iv[16]; int blks; /* Generate the derived key. We use AES 128 bits key, Electroic-Code-Book cipher mode, and SHA1 as the message digest when generating the key. IV is not used in ecb mode, actually. */ key_len = EVP_BytesToKey (EVP_aes_128_ecb(), /* cipher mode */ EVP_sha1(), /* message digest */ NULL, /* salt */ (unsigned char*)code, /* passwd */ code_len, 3, /* iteration times */ key, /* the derived key */ iv); /* IV, initial vector */ /* The key should be 16 bytes long for our 128 bit key. */ if (key_len != 16) { g_warning ("failed to init EVP_CIPHER_CTX.\n"); return -1; } /* Prepare CTX for encryption. */ ctx = EVP_CIPHER_CTX_new (); ret = EVP_EncryptInit_ex (ctx, EVP_aes_128_ecb(), /* cipher mode */ NULL, /* engine, NULL for default */ key, /* derived key */ iv); /* initial vector */ if (ret == ENC_FAILURE){ EVP_CIPHER_CTX_free (ctx); return -1; } /* Allocating output buffer. */ /* For EVP symmetric encryption, padding is always used __even if__ data size is a multiple of block size, in which case the padding length is the block size. so we have the following: */ blks = (in_len / BLK_SIZE) + 1; *data_out = (char *)g_malloc (blks * BLK_SIZE); if (*data_out == NULL) { g_warning ("failed to allocate the output buffer.\n"); goto enc_error; } int update_len, final_len; /* Do the encryption. */ ret = EVP_EncryptUpdate (ctx, (unsigned char*)*data_out, &update_len, (unsigned char*)data_in, in_len); if (ret == ENC_FAILURE) goto enc_error; /* Finish the possible partial block. */ ret = EVP_EncryptFinal_ex (ctx, (unsigned char*)*data_out + update_len, &final_len); *out_len = update_len + final_len; /* out_len should be equal to the allocated buffer size. */ if (ret == ENC_FAILURE || *out_len != (blks * BLK_SIZE)) goto enc_error; EVP_CIPHER_CTX_free (ctx); return 0; enc_error: EVP_CIPHER_CTX_free (ctx); *out_len = -1; if (*data_out != NULL) g_free (*data_out); *data_out = NULL; return -1; } int ccnet_decrypt (char **data_out, int *out_len, const char *data_in, const int in_len, const char *code, const int code_len) { *data_out = NULL; *out_len = -1; /* Check validation. Because padding is always used, in_len must * be a multiple of BLK_SIZE */ if ( data_in == NULL || in_len <= 0 || in_len % BLK_SIZE != 0 || code == NULL || code_len <= 0) { g_warning ("Invalid param(s).\n"); return -1; } EVP_CIPHER_CTX *ctx; int ret, key_len; unsigned char key[16], iv[16]; /* Generate the derived key. We use AES 128 bits key, Electroic-Code-Book cipher mode, and SHA1 as the message digest when generating the key. IV is not used in ecb mode, actually. */ key_len = EVP_BytesToKey (EVP_aes_128_ecb(), /* cipher mode */ EVP_sha1(), /* message digest */ NULL, /* salt */ (unsigned char*)code, /* passwd */ code_len, 3, /* iteration times */ key, /* the derived key */ iv); /* IV, initial vector */ /* The key should be 16 bytes long for our 128 bit key. */ if (key_len != 16) { g_warning ("failed to init EVP_CIPHER_CTX.\n"); return -1; } /* Prepare CTX for decryption. */ ctx = EVP_CIPHER_CTX_new (); ret = EVP_DecryptInit_ex (ctx, EVP_aes_128_ecb(), /* cipher mode */ NULL, /* engine, NULL for default */ key, /* derived key */ iv); /* initial vector */ if (ret == DEC_FAILURE) return -1; /* Allocating output buffer. */ *data_out = (char *)g_malloc (in_len); if (*data_out == NULL) { g_warning ("failed to allocate the output buffer.\n"); goto dec_error; } int update_len, final_len; /* Do the decryption. */ ret = EVP_DecryptUpdate (ctx, (unsigned char*)*data_out, &update_len, (unsigned char*)data_in, in_len); if (ret == DEC_FAILURE) goto dec_error; /* Finish the possible partial block. */ ret = EVP_DecryptFinal_ex (ctx, (unsigned char*)*data_out + update_len, &final_len); *out_len = update_len + final_len; /* out_len should be smaller than in_len. */ if (ret == DEC_FAILURE || *out_len > in_len) goto dec_error; EVP_CIPHER_CTX_free (ctx); return 0; dec_error: EVP_CIPHER_CTX_free (ctx); *out_len = -1; if (*data_out != NULL) g_free (*data_out); *data_out = NULL; return -1; } /* convert locale specific input to utf8 encoded string */ char *ccnet_locale_to_utf8 (const gchar *src) { if (!src) return NULL; gsize bytes_read = 0; gsize bytes_written = 0; GError *error = NULL; gchar *dst = NULL; dst = g_locale_to_utf8 (src, /* locale specific string */ strlen(src), /* len of src */ &bytes_read, /* length processed */ &bytes_written, /* output length */ &error); if (error) { return NULL; } return dst; } /* convert utf8 input to locale specific string */ char *ccnet_locale_from_utf8 (const gchar *src) { if (!src) return NULL; gsize bytes_read = 0; gsize bytes_written = 0; GError *error = NULL; gchar *dst = NULL; dst = g_locale_from_utf8 (src, /* locale specific string */ strlen(src), /* len of src */ &bytes_read, /* length processed */ &bytes_written, /* output length */ &error); if (error) { return NULL; } return dst; } #ifdef WIN32 static HANDLE get_process_handle (const char *process_name_in) { char name[256]; if (strstr(process_name_in, ".exe")) { snprintf (name, sizeof(name), "%s", process_name_in); } else { snprintf (name, sizeof(name), "%s.exe", process_name_in); } DWORD aProcesses[1024], cbNeeded, cProcesses; if (!EnumProcesses(aProcesses, sizeof(aProcesses), &cbNeeded)) return NULL; /* Calculate how many process identifiers were returned. */ cProcesses = cbNeeded / sizeof(DWORD); HANDLE hProcess; HMODULE hMod; char process_name[SEAF_PATH_MAX]; unsigned int i; for (i = 0; i < cProcesses; i++) { if(aProcesses[i] == 0) continue; hProcess = OpenProcess (PROCESS_ALL_ACCESS, FALSE, aProcesses[i]); if (!hProcess) continue; if (EnumProcessModules(hProcess, &hMod, sizeof(hMod), &cbNeeded)) { GetModuleBaseName(hProcess, hMod, process_name, sizeof(process_name)/sizeof(char)); } if (strcasecmp(process_name, name) == 0) return hProcess; else { CloseHandle(hProcess); } } /* Not found */ return NULL; } int count_process (const char *process_name_in) { char name[SEAF_PATH_MAX]; char process_name[SEAF_PATH_MAX]; DWORD aProcesses[1024], cbNeeded, cProcesses; HANDLE hProcess; HMODULE hMods[1024]; int count = 0; int i, j; if (strstr(process_name_in, ".exe")) { snprintf (name, sizeof(name), "%s", process_name_in); } else { snprintf (name, sizeof(name), "%s.exe", process_name_in); } if (!EnumProcesses(aProcesses, sizeof(aProcesses), &cbNeeded)) { return 0; } /* Calculate how many process identifiers were returned. */ cProcesses = cbNeeded / sizeof(DWORD); for (i = 0; i < cProcesses; i++) { if(aProcesses[i] == 0) continue; hProcess = OpenProcess (PROCESS_QUERY_INFORMATION | PROCESS_VM_READ, FALSE, aProcesses[i]); if (!hProcess) { continue; } if (EnumProcessModules(hProcess, hMods, sizeof(hMods), &cbNeeded)) { for (j = 0; j < cbNeeded / sizeof(HMODULE); j++) { if (GetModuleBaseName(hProcess, hMods[j], process_name, sizeof(process_name))) { if (strcasecmp(process_name, name) == 0) count++; } } } CloseHandle(hProcess); } return count; } gboolean process_is_running (const char *process_name) { HANDLE proc_handle = get_process_handle(process_name); if (proc_handle) { CloseHandle(proc_handle); return TRUE; } else { return FALSE; } } int win32_kill_process (const char *process_name) { HANDLE proc_handle = get_process_handle(process_name); if (proc_handle) { TerminateProcess(proc_handle, 0); CloseHandle(proc_handle); return 0; } else { return -1; } } int win32_spawn_process (char *cmdline_in, char *working_directory_in) { if (!cmdline_in) return -1; wchar_t *cmdline_w = NULL; wchar_t *working_directory_w = NULL; cmdline_w = wchar_from_utf8 (cmdline_in); if (!cmdline_in) { g_warning ("failed to convert cmdline_in"); return -1; } if (working_directory_in) { working_directory_w = wchar_from_utf8 (working_directory_in); if (!working_directory_w) { g_warning ("failed to convert working_directory_in"); return -1; } } STARTUPINFOW si; PROCESS_INFORMATION pi; unsigned flags; BOOL success; /* we want to execute seafile without crreating a console window */ flags = CREATE_NO_WINDOW; memset(&si, 0, sizeof(si)); si.cb = sizeof(si); si.dwFlags = STARTF_USESTDHANDLES | STARTF_FORCEOFFFEEDBACK; si.hStdInput = (HANDLE) _get_osfhandle(0); si.hStdOutput = (HANDLE) _get_osfhandle(1); si.hStdError = (HANDLE) _get_osfhandle(2); memset(&pi, 0, sizeof(pi)); success = CreateProcessW (NULL, cmdline_w, NULL, NULL, TRUE, flags, NULL, working_directory_w, &si, &pi); free (cmdline_w); if (working_directory_w) free (working_directory_w); if (!success) { g_warning ("failed to fork_process: GLE=%lu\n", GetLastError()); return -1; } /* close the handle of thread so that the process object can be freed by * system */ CloseHandle(pi.hThread); CloseHandle(pi.hProcess); return 0; } char * wchar_to_utf8 (const wchar_t *wch) { if (wch == NULL) { return NULL; } char *utf8 = NULL; int bufsize, len; bufsize = WideCharToMultiByte (CP_UTF8, /* multibyte code page */ 0, /* flags */ wch, /* src */ -1, /* src len, -1 for all includes \0 */ utf8, /* dst */ 0, /* dst buf len */ NULL, /* default char */ NULL); /* BOOL flag indicates default char is used */ if (bufsize <= 0) { g_warning ("failed to convert a string from wchar to utf8 0"); return NULL; } utf8 = g_malloc(bufsize); len = WideCharToMultiByte (CP_UTF8, /* multibyte code page */ 0, /* flags */ wch, /* src */ -1, /* src len, -1 for all includes \0 */ utf8, /* dst */ bufsize, /* dst buf len */ NULL, /* default char */ NULL); /* BOOL flag indicates default char is used */ if (len != bufsize) { g_free (utf8); g_warning ("failed to convert a string from wchar to utf8"); return NULL; } return utf8; } wchar_t * wchar_from_utf8 (const char *utf8) { if (utf8 == NULL) { return NULL; } wchar_t *wch = NULL; int bufsize, len; bufsize = MultiByteToWideChar (CP_UTF8, /* multibyte code page */ 0, /* flags */ utf8, /* src */ -1, /* src len, -1 for all includes \0 */ wch, /* dst */ 0); /* dst buf len */ if (bufsize <= 0) { g_warning ("failed to convert a string from wchar to utf8 0"); return NULL; } wch = g_malloc (bufsize * sizeof(wchar_t)); len = MultiByteToWideChar (CP_UTF8, /* multibyte code page */ 0, /* flags */ utf8, /* src */ -1, /* src len, -1 for all includes \0 */ wch, /* dst */ bufsize); /* dst buf len */ if (len != bufsize) { g_free (wch); g_warning ("failed to convert a string from utf8 to wchar"); return NULL; } return wch; } #endif /* ifdef WIN32 */ #ifdef __linux__ /* read the link of /proc/123/exe and compare with `process_name' */ static int find_process_in_dirent(struct dirent *dir, const char *process_name) { char path[512]; /* fisrst construct a path like /proc/123/exe */ if (sprintf (path, "/proc/%s/exe", dir->d_name) < 0) { return -1; } char buf[SEAF_PATH_MAX]; /* get the full path of exe */ ssize_t l = readlink(path, buf, SEAF_PATH_MAX); if (l < 0) return -1; buf[l] = '\0'; /* get the base name of exe */ char *base = g_path_get_basename(buf); int ret = strcmp(base, process_name); g_free(base); if (ret == 0) return atoi(dir->d_name); else return -1; } /* read the /proc fs to determine whether some process is running */ gboolean process_is_running (const char *process_name) { DIR *proc_dir = opendir("/proc"); if (!proc_dir) { fprintf (stderr, "failed to open /proc/ dir\n"); return FALSE; } struct dirent *subdir = NULL; while ((subdir = readdir(proc_dir))) { char first = subdir->d_name[0]; /* /proc/[1-9][0-9]* */ if (first > '9' || first < '1') continue; int pid = find_process_in_dirent(subdir, process_name); if (pid > 0) { closedir(proc_dir); return TRUE; } } closedir(proc_dir); return FALSE; } int count_process(const char *process_name) { int count = 0; DIR *proc_dir = opendir("/proc"); if (!proc_dir) { g_warning ("failed to open /proc/ :%s\n", strerror(errno)); return FALSE; } struct dirent *subdir = NULL; while ((subdir = readdir(proc_dir))) { char first = subdir->d_name[0]; /* /proc/[1-9][0-9]* */ if (first > '9' || first < '1') continue; if (find_process_in_dirent(subdir, process_name) > 0) { count++; } } closedir (proc_dir); return count; } #endif #ifdef __APPLE__ gboolean process_is_running (const char *process_name) { //TODO return FALSE; } #endif char* ccnet_object_type_from_id (const char *object_id) { char *ptr; if ( !(ptr = strchr(object_id, '/')) ) return NULL; return g_strndup(object_id, ptr - object_id); } #ifdef WIN32 /** * In Win32 we need to use _stat64 for files larger than 2GB. _stat64 needs * the `path' argument in gbk encoding. */ #define STAT_STRUCT struct __stat64 #define STAT_FUNC win_stat64_utf8 static inline int win_stat64_utf8 (char *path_utf8, STAT_STRUCT *sb) { wchar_t *path_w = wchar_from_utf8 (path_utf8); int result = _wstat64 (path_w, sb); free (path_w); return result; } #else #define STAT_STRUCT struct stat #define STAT_FUNC stat #endif static gint64 calc_recursively (const char *path, GError **calc_error) { gint64 sum = 0; GError *error = NULL; GDir *folder = g_dir_open(path, 0, &error); if (!folder) { g_set_error (calc_error, CCNET_DOMAIN, 0, "g_open() dir %s failed:%s\n", path, error->message); return -1; } const char *name = NULL; while ((name = g_dir_read_name(folder)) != NULL) { STAT_STRUCT sb; char *full_path= g_build_filename (path, name, NULL); if (STAT_FUNC(full_path, &sb) < 0) { g_set_error (calc_error, CCNET_DOMAIN, 0, "failed to stat on %s: %s\n", full_path, strerror(errno)); g_free(full_path); g_dir_close(folder); return -1; } if (S_ISDIR(sb.st_mode)) { gint64 size = calc_recursively(full_path, calc_error); if (size < 0) { g_free (full_path); g_dir_close (folder); return -1; } sum += size; g_free(full_path); } else if (S_ISREG(sb.st_mode)) { sum += sb.st_size; g_free(full_path); } } g_dir_close (folder); return sum; } gint64 ccnet_calc_directory_size (const char *path, GError **error) { return calc_recursively (path, error); } #ifdef WIN32 /* * strtok_r code directly from glibc.git /string/strtok_r.c since windows * doesn't have it. */ char * strtok_r(char *s, const char *delim, char **save_ptr) { char *token; if(s == NULL) s = *save_ptr; /* Scan leading delimiters. */ s += strspn(s, delim); if(*s == '\0') { *save_ptr = s; return NULL; } /* Find the end of the token. */ token = s; s = strpbrk(token, delim); if(s == NULL) { /* This token finishes the string. */ *save_ptr = strchr(token, '\0'); } else { /* Terminate the token and make *SAVE_PTR point past it. */ *s = '\0'; *save_ptr = s + 1; } return token; } #endif /* JSON related utils. For compatibility with json-glib. */ const char * json_object_get_string_member (json_t *object, const char *key) { json_t *string = json_object_get (object, key); if (!string) return NULL; return json_string_value (string); } gboolean json_object_has_member (json_t *object, const char *key) { return (json_object_get (object, key) != NULL); } gint64 json_object_get_int_member (json_t *object, const char *key) { json_t *integer = json_object_get (object, key); return json_integer_value (integer); } void json_object_set_string_member (json_t *object, const char *key, const char *value) { json_object_set_new (object, key, json_string (value)); } void json_object_set_int_member (json_t *object, const char *key, gint64 value) { json_object_set_new (object, key, json_integer (value)); } void clean_utf8_data (char *data, int len) { const char *s, *e; char *p; gboolean is_valid; s = data; p = data; while ((s - data) != len) { is_valid = g_utf8_validate (s, len - (s - data), &e); if (is_valid) break; if (s != e) p += (e - s); *p = '?'; ++p; s = e + 1; } } char * normalize_utf8_path (const char *path) { if (!g_utf8_validate (path, -1, NULL)) return NULL; return g_utf8_normalize (path, -1, G_NORMALIZE_NFC); } /* zlib related wrapper functions. */ #define ZLIB_BUF_SIZE 16384 int seaf_compress (guint8 *input, int inlen, guint8 **output, int *outlen) { int ret; unsigned have; z_stream strm; guint8 out[ZLIB_BUF_SIZE]; GByteArray *barray; if (inlen == 0) return -1; /* allocate deflate state */ strm.zalloc = Z_NULL; strm.zfree = Z_NULL; strm.opaque = Z_NULL; ret = deflateInit(&strm, Z_DEFAULT_COMPRESSION); if (ret != Z_OK) { g_warning ("deflateInit failed.\n"); return -1; } strm.avail_in = inlen; strm.next_in = input; barray = g_byte_array_new (); do { strm.avail_out = ZLIB_BUF_SIZE; strm.next_out = out; ret = deflate(&strm, Z_FINISH); /* no bad return value */ have = ZLIB_BUF_SIZE - strm.avail_out; g_byte_array_append (barray, out, have); } while (ret != Z_STREAM_END); *outlen = barray->len; *output = g_byte_array_free (barray, FALSE); /* clean up and return */ (void)deflateEnd(&strm); return 0; } int seaf_decompress (guint8 *input, int inlen, guint8 **output, int *outlen) { int ret; unsigned have; z_stream strm; unsigned char out[ZLIB_BUF_SIZE]; GByteArray *barray; if (inlen == 0) { g_warning ("Empty input for zlib, invalid.\n"); return -1; } /* allocate inflate state */ strm.zalloc = Z_NULL; strm.zfree = Z_NULL; strm.opaque = Z_NULL; strm.avail_in = 0; strm.next_in = Z_NULL; ret = inflateInit(&strm); if (ret != Z_OK) { g_warning ("inflateInit failed.\n"); return -1; } strm.avail_in = inlen; strm.next_in = input; barray = g_byte_array_new (); do { strm.avail_out = ZLIB_BUF_SIZE; strm.next_out = out; ret = inflate(&strm, Z_NO_FLUSH); if (ret < 0) { g_warning ("Failed to inflate.\n"); goto out; } have = ZLIB_BUF_SIZE - strm.avail_out; g_byte_array_append (barray, out, have); } while (ret != Z_STREAM_END); out: /* clean up and return */ (void)inflateEnd(&strm); if (ret == Z_STREAM_END) { *outlen = barray->len; *output = g_byte_array_free (barray, FALSE); return 0; } else { g_byte_array_free (barray, TRUE); return -1; } } char* format_dir_path (const char *path) { int path_len = strlen (path); char *rpath; if (path[0] != '/') { rpath = g_strconcat ("/", path, NULL); path_len++; } else { rpath = g_strdup (path); } while (path_len > 1 && rpath[path_len-1] == '/') { rpath[path_len-1] = '\0'; path_len--; } return rpath; } gboolean is_empty_string (const char *str) { return !str || strcmp (str, "") == 0; } gboolean is_permission_valid (const char *perm) { if (is_empty_string (perm)) { return FALSE; } return strcmp (perm, "r") == 0 || strcmp (perm, "rw") == 0; } char * seaf_key_file_get_string (GKeyFile *key_file, const char *group, const char *key, GError **error) { char *v; v = g_key_file_get_string (key_file, group, key, error); if (!v || v[0] == '\0') { g_free (v); return NULL; } return g_strchomp(v); } gchar* ccnet_key_file_get_string (GKeyFile *keyf, const char *category, const char *key) { gchar *v; if (!g_key_file_has_key (keyf, category, key, NULL)) return NULL; v = g_key_file_get_string (keyf, category, key, NULL); if (v != NULL && v[0] == '\0') { g_free(v); return NULL; } return g_strchomp(v); } ================================================ FILE: lib/utils.h ================================================ /* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */ #ifndef CCNET_UTILS_H #define CCNET_UTILS_H #ifdef WIN32 #ifndef _WIN32_WINNT #define _WIN32_WINNT 0x500 #endif #include #endif #include #include #include #include #include #include #include #include #include #if defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__) #include #else #include #endif #ifdef __linux__ #include #endif #ifdef __OpenBSD__ #include #endif #ifdef WIN32 #include #include #ifndef WEXITSTATUS #define WEXITSTATUS(status) (((status) & 0xff00) >> 8) #endif /* Borrowed from libevent */ #define ccnet_pipe_t intptr_t int pgpipe (ccnet_pipe_t handles[2]); /* Should only be called in main loop. */ #define ccnet_pipe(a) pgpipe((a)) #define piperead(a,b,c) recv((a),(b),(c),0) #define pipewrite(a,b,c) send((a),(b),(c),0) #define pipeclose(a) closesocket((a)) #define SeafStat struct __stat64 #else #define ccnet_pipe_t int #define ccnet_pipe(a) pipe((a)) #define piperead(a,b,c) read((a),(b),(c)) #define pipewrite(a,b,c) write((a),(b),(c)) #define pipeclose(a) close((a)) #define SeafStat struct stat #endif #define pipereadn(a,b,c) recvn((a),(b),(c)) #define pipewriten(a,b,c) sendn((a),(b),(c)) int seaf_stat (const char *path, SeafStat *st); int seaf_fstat (int fd, SeafStat *st); #ifdef WIN32 void seaf_stat_from_find_data (WIN32_FIND_DATAW *fdata, SeafStat *st); #endif int seaf_set_file_time (const char *path, guint64 mtime); #ifdef WIN32 wchar_t * win32_long_path (const char *path); /* Convert a (possible) 8.3 format path to long path */ wchar_t * win32_83_path_to_long_path (const char *worktree, const wchar_t *path, int path_len); __time64_t file_time_to_unix_time (FILETIME *ftime); #endif int seaf_util_unlink (const char *path); int seaf_util_rmdir (const char *path); int seaf_util_mkdir (const char *path, mode_t mode); int seaf_util_open (const char *path, int flags); int seaf_util_create (const char *path, int flags, mode_t mode); int seaf_util_rename (const char *oldpath, const char *newpath); gboolean seaf_util_exists (const char *path); gint64 seaf_util_lseek (int fd, gint64 offset, int whence); #ifdef WIN32 typedef int (*DirentCallback) (wchar_t *parent, WIN32_FIND_DATAW *fdata, void *user_data, gboolean *stop); int traverse_directory_win32 (wchar_t *path_w, DirentCallback callback, void *user_data); #endif #ifndef O_BINARY #define O_BINARY 0 #endif /* for debug */ #ifndef ccnet_warning #define ccnet_warning(fmt, ...) g_warning("%s(%d): " fmt, __FILE__, __LINE__, ##__VA_ARGS__) #endif #ifndef ccnet_error #define ccnet_error(fmt, ...) g_error("%s(%d): " fmt, __FILE__, __LINE__, ##__VA_ARGS__) #endif #ifndef ccnet_message #define ccnet_message(fmt, ...) g_message("%s(%d): " fmt, __FILE__, __LINE__, ##__VA_ARGS__) #endif #define CCNET_DOMAIN g_quark_from_string("ccnet") #define CCNET_ERR_INTERNAL 500 struct timeval timeval_from_msec (uint64_t milliseconds); size_t ccnet_strlcpy (char *dst, const char *src, size_t size); void rawdata_to_hex (const unsigned char *rawdata, char *hex_str, int n_bytes); int hex_to_rawdata (const char *hex_str, unsigned char *rawdata, int n_bytes); #define sha1_to_hex(sha1, hex) rawdata_to_hex((sha1), (hex), 20) #define hex_to_sha1(hex, sha1) hex_to_rawdata((hex), (sha1), 20) /* If msg is NULL-terminated, set len to -1 */ int calculate_sha1 (unsigned char *sha1, const char *msg, int len); int ccnet_sha1_equal (const void *v1, const void *v2); unsigned int ccnet_sha1_hash (const void *v); char* gen_uuid (); void gen_uuid_inplace (char *buf); gboolean is_uuid_valid (const char *uuid_str); gboolean is_object_id_valid (const char *obj_id); /* dir operations */ int checkdir (const char *dir); int checkdir_with_mkdir (const char *path); char* ccnet_expand_path (const char *src); /** * Make directory with 256 sub-directories from '00' to 'ff'. * `base` and subdir will be created if they are not existing. */ int objstore_mkdir (const char *base); void objstore_get_path (char *path, const char *base, const char *obj_id); /* Read "n" bytes from a descriptor. */ ssize_t readn(int fd, void *vptr, size_t n); ssize_t writen(int fd, const void *vptr, size_t n); /* Read "n" bytes from a socket. */ ssize_t recvn(evutil_socket_t fd, void *vptr, size_t n); ssize_t sendn(evutil_socket_t fd, const void *vptr, size_t n); int copy_fd (int ifd, int ofd); int copy_file (const char *dst, const char *src, int mode); /* string utilities */ char** strsplit_by_char (char *string, int *length, char c); char * strjoin_n (const char *seperator, int argc, char **argv); int is_ipaddr_valid (const char *ip); typedef void (*KeyValueFunc) (void *data, const char *key, char *value); void parse_key_value_pairs (char *string, KeyValueFunc func, void *data); typedef gboolean (*KeyValueFunc2) (void *data, const char *key, const char *value); void parse_key_value_pairs2 (char *string, KeyValueFunc2 func, void *data); GList *string_list_append (GList *str_list, const char *string); GList *string_list_append_sorted (GList *str_list, const char *string); GList *string_list_remove (GList *str_list, const char *string); void string_list_free (GList *str_list); gboolean string_list_is_exists (GList *str_list, const char *string); void string_list_join (GList *str_list, GString *strbuf, const char *seperator); GList *string_list_parse (const char *list_in_str, const char *seperator); GList *string_list_parse_sorted (const char *list_in_str, const char *seperator); gboolean string_list_sorted_is_equal (GList *list1, GList *list2); char** ncopy_string_array (char **orig, int n); void nfree_string_array (char **array, int n); /* 64bit time */ gint64 get_current_time(); int ccnet_encrypt (char **data_out, int *out_len, const char *data_in, const int in_len, const char *code, const int code_len); int ccnet_decrypt (char **data_out, int *out_len, const char *data_in, const int in_len, const char *code, const int code_len); /* * Utility functions for converting data to/from network byte order. */ static inline uint64_t bswap64 (uint64_t val) { uint64_t ret; uint8_t *ptr = (uint8_t *)&ret; ptr[0]=((val)>>56)&0xFF; ptr[1]=((val)>>48)&0xFF; ptr[2]=((val)>>40)&0xFF; ptr[3]=((val)>>32)&0xFF; ptr[4]=((val)>>24)&0xFF; ptr[5]=((val)>>16)&0xFF; ptr[6]=((val)>>8)&0xFF; ptr[7]=(val)&0xFF; return ret; } static inline uint64_t hton64(uint64_t val) { #if __BYTE_ORDER == __LITTLE_ENDIAN || defined WIN32 || defined __APPLE__ return bswap64 (val); #else return val; #endif } static inline uint64_t ntoh64(uint64_t val) { #if __BYTE_ORDER == __LITTLE_ENDIAN || defined WIN32 || defined __APPLE__ return bswap64 (val); #else return val; #endif } static inline void put64bit(uint8_t **ptr,uint64_t val) { uint64_t val_n = hton64 (val); *((uint64_t *)(*ptr)) = val_n; (*ptr)+=8; } static inline void put32bit(uint8_t **ptr,uint32_t val) { uint32_t val_n = htonl (val); *((uint32_t *)(*ptr)) = val_n; (*ptr)+=4; } static inline void put16bit(uint8_t **ptr,uint16_t val) { uint16_t val_n = htons (val); *((uint16_t *)(*ptr)) = val_n; (*ptr)+=2; } static inline uint64_t get64bit(const uint8_t **ptr) { uint64_t val_h = ntoh64 (*((uint64_t *)(*ptr))); (*ptr)+=8; return val_h; } static inline uint32_t get32bit(const uint8_t **ptr) { uint32_t val_h = ntohl (*((uint32_t *)(*ptr))); (*ptr)+=4; return val_h; } static inline uint16_t get16bit(const uint8_t **ptr) { uint16_t val_h = ntohs (*((uint16_t *)(*ptr))); (*ptr)+=2; return val_h; } /* Convert between local encoding and utf8. Returns the converted * string if success, otherwise return NULL */ char *ccnet_locale_from_utf8 (const gchar *src); char *ccnet_locale_to_utf8 (const gchar *src); /* Detect whether a process with the given name is running right now. */ gboolean process_is_running(const char *name); /* count how much instance of a program is running */ int count_process (const char *process_name_in); #ifdef WIN32 int win32_kill_process (const char *process_name_in); int win32_spawn_process (char *cmd, char *wd); char *wchar_to_utf8 (const wchar_t *src); wchar_t *wchar_from_utf8 (const char *src); #endif char* ccnet_object_type_from_id (const char *object_id); gint64 ccnet_calc_directory_size (const char *path, GError **error); #ifdef WIN32 char * strtok_r(char *s, const char *delim, char **save_ptr); #endif #include const char * json_object_get_string_member (json_t *object, const char *key); gboolean json_object_has_member (json_t *object, const char *key); gint64 json_object_get_int_member (json_t *object, const char *key); void json_object_set_string_member (json_t *object, const char *key, const char *value); void json_object_set_int_member (json_t *object, const char *key, gint64 value); /* Replace invalid UTF-8 bytes with '?' */ void clean_utf8_data (char *data, int len); char * normalize_utf8_path (const char *path); /* zlib related functions. */ int seaf_compress (guint8 *input, int inlen, guint8 **output, int *outlen); int seaf_decompress (guint8 *input, int inlen, guint8 **output, int *outlen); char* format_dir_path (const char *path); gboolean is_empty_string (const char *str); gboolean is_permission_valid (const char *perm); char * seaf_key_file_get_string (GKeyFile *key_file, const char *group, const char *key, GError **error); gchar* ccnet_key_file_get_string (GKeyFile *keyf, const char *category, const char *key); #endif ================================================ FILE: lib/webaccess.vala ================================================ namespace Seafile { public class WebAccess : Object { public string repo_id { set; get; } public string obj_id { set; get; } public string op { set; get; } public string username { set; get; } } } ================================================ FILE: m4/ax_lib_sqlite3.m4 ================================================ # =========================================================================== # http://www.nongnu.org/autoconf-archive/ax_lib_sqlite3.html # =========================================================================== # # SYNOPSIS # # AX_LIB_SQLITE3([MINIMUM-VERSION]) # # DESCRIPTION # # Test for the SQLite 3 library of a particular version (or newer) # # This macro takes only one optional argument, required version of SQLite # 3 library. If required version is not passed, 3.0.0 is used in the test # of existance of SQLite 3. # # If no intallation prefix to the installed SQLite library is given the # macro searches under /usr, /usr/local, and /opt. # # This macro calls: # # AC_SUBST(SQLITE3_CFLAGS) # AC_SUBST(SQLITE3_LDFLAGS) # AC_SUBST(SQLITE3_VERSION) # # And sets: # # HAVE_SQLITE3 # # LICENSE # # Copyright (c) 2008 Mateusz Loskot # # Copying and distribution of this file, with or without modification, are # permitted in any medium without royalty provided the copyright notice # and this notice are preserved. AC_DEFUN([AX_LIB_SQLITE3], [ AC_ARG_WITH([sqlite3], AC_HELP_STRING( [--with-sqlite3=@<:@ARG@:>@], [use SQLite 3 library @<:@default=yes@:>@, optionally specify the prefix for sqlite3 library] ), [ if test "$withval" = "no"; then WANT_SQLITE3="no" elif test "$withval" = "yes"; then WANT_SQLITE3="yes" ac_sqlite3_path="" else WANT_SQLITE3="yes" ac_sqlite3_path="$withval" fi ], [WANT_SQLITE3="yes"] ) SQLITE3_CFLAGS="" SQLITE3_LDFLAGS="" SQLITE3_VERSION="" if test "x$WANT_SQLITE3" = "xyes"; then ac_sqlite3_header="sqlite3.h" sqlite3_version_req=ifelse([$1], [], [3.0.0], [$1]) sqlite3_version_req_shorten=`expr $sqlite3_version_req : '\([[0-9]]*\.[[0-9]]*\)'` sqlite3_version_req_major=`expr $sqlite3_version_req : '\([[0-9]]*\)'` sqlite3_version_req_minor=`expr $sqlite3_version_req : '[[0-9]]*\.\([[0-9]]*\)'` sqlite3_version_req_micro=`expr $sqlite3_version_req : '[[0-9]]*\.[[0-9]]*\.\([[0-9]]*\)'` if test "x$sqlite3_version_req_micro" = "x" ; then sqlite3_version_req_micro="0" fi sqlite3_version_req_number=`expr $sqlite3_version_req_major \* 1000000 \ \+ $sqlite3_version_req_minor \* 1000 \ \+ $sqlite3_version_req_micro` AC_MSG_CHECKING([for SQLite3 library >= $sqlite3_version_req]) if test "$ac_sqlite3_path" != ""; then ac_sqlite3_ldflags="-L$ac_sqlite3_path/lib" ac_sqlite3_cppflags="-I$ac_sqlite3_path/include" else for ac_sqlite3_path_tmp in /usr /usr/local /opt ; do if test -f "$ac_sqlite3_path_tmp/include/$ac_sqlite3_header" \ && test -r "$ac_sqlite3_path_tmp/include/$ac_sqlite3_header"; then ac_sqlite3_path=$ac_sqlite3_path_tmp ac_sqlite3_cppflags="-I$ac_sqlite3_path_tmp/include" ac_sqlite3_ldflags="-L$ac_sqlite3_path_tmp/lib" break; fi done fi ac_sqlite3_ldflags="$ac_sqlite3_ldflags -lsqlite3" saved_CPPFLAGS="$CPPFLAGS" CPPFLAGS="$CPPFLAGS $ac_sqlite3_cppflags" AC_COMPILE_IFELSE( [ AC_LANG_PROGRAM([[@%:@include ]], [[ #if (SQLITE_VERSION_NUMBER >= $sqlite3_version_req_number) // Everything is okay #else # error SQLite version is too old #endif ]] ) ], [ AC_MSG_RESULT([yes]) success="yes" ], [ AC_MSG_RESULT([not found]) succees="no" ] ) CPPFLAGS="$saved_CPPFLAGS" if test "$success" = "yes"; then SQLITE3_CFLAGS="$ac_sqlite3_cppflags" SQLITE3_LDFLAGS="$ac_sqlite3_ldflags" ac_sqlite3_header_path="$ac_sqlite3_path/include/$ac_sqlite3_header" dnl Retrieve SQLite release version if test "x$ac_sqlite3_header_path" != "x"; then ac_sqlite3_version=`cat $ac_sqlite3_header_path \ | grep '#define.*SQLITE_VERSION.*\"' | sed -e 's/.* "//' \ | sed -e 's/"//'` if test $ac_sqlite3_version != ""; then SQLITE3_VERSION=$ac_sqlite3_version else AC_MSG_WARN([Can not find SQLITE_VERSION macro in sqlite3.h header to retrieve SQLite version!]) fi fi AC_SUBST(SQLITE3_CFLAGS) AC_SUBST(SQLITE3_LDFLAGS) AC_SUBST(SQLITE3_VERSION) AC_DEFINE([HAVE_SQLITE3], [], [Have the SQLITE3 library]) fi fi ]) ================================================ FILE: m4/glib-gettext.m4 ================================================ # Copyright (C) 1995-2002 Free Software Foundation, Inc. # Copyright (C) 2001-2003,2004 Red Hat, Inc. # # This file is free software, distributed under the terms of the GNU # General Public License. As a special exception to the GNU General # Public License, this file may be distributed as part of a program # that contains a configuration script generated by Autoconf, under # the same distribution terms as the rest of that program. # # This file can be copied and used freely without restrictions. It can # be used in projects which are not available under the GNU Public License # but which still want to provide support for the GNU gettext functionality. # # Macro to add for using GNU gettext. # Ulrich Drepper , 1995, 1996 # # Modified to never use included libintl. # Owen Taylor , 12/15/1998 # # Major rework to remove unused code # Owen Taylor , 12/11/2002 # # Added better handling of ALL_LINGUAS from GNU gettext version # written by Bruno Haible, Owen Taylor 5/30/3002 # # Modified to require ngettext # Matthias Clasen 08/06/2004 # # We need this here as well, since someone might use autoconf-2.5x # to configure GLib then an older version to configure a package # using AM_GLIB_GNU_GETTEXT AC_PREREQ(2.53) dnl dnl We go to great lengths to make sure that aclocal won't dnl try to pull in the installed version of these macros dnl when running aclocal in the glib directory. dnl m4_copy([AC_DEFUN],[glib_DEFUN]) m4_copy([AC_REQUIRE],[glib_REQUIRE]) dnl dnl At the end, if we're not within glib, we'll define the public dnl definitions in terms of our private definitions. dnl # GLIB_LC_MESSAGES #-------------------- glib_DEFUN([GLIB_LC_MESSAGES], [AC_CHECK_HEADERS([locale.h]) if test $ac_cv_header_locale_h = yes; then AC_CACHE_CHECK([for LC_MESSAGES], am_cv_val_LC_MESSAGES, [AC_TRY_LINK([#include ], [return LC_MESSAGES], am_cv_val_LC_MESSAGES=yes, am_cv_val_LC_MESSAGES=no)]) if test $am_cv_val_LC_MESSAGES = yes; then AC_DEFINE(HAVE_LC_MESSAGES, 1, [Define if your file defines LC_MESSAGES.]) fi fi]) # GLIB_PATH_PROG_WITH_TEST #---------------------------- dnl GLIB_PATH_PROG_WITH_TEST(VARIABLE, PROG-TO-CHECK-FOR, dnl TEST-PERFORMED-ON-FOUND_PROGRAM [, VALUE-IF-NOT-FOUND [, PATH]]) glib_DEFUN([GLIB_PATH_PROG_WITH_TEST], [# Extract the first word of "$2", so it can be a program name with args. set dummy $2; ac_word=[$]2 AC_MSG_CHECKING([for $ac_word]) AC_CACHE_VAL(ac_cv_path_$1, [case "[$]$1" in /*) ac_cv_path_$1="[$]$1" # Let the user override the test with a path. ;; *) IFS="${IFS= }"; ac_save_ifs="$IFS"; IFS="${IFS}:" for ac_dir in ifelse([$5], , $PATH, [$5]); do test -z "$ac_dir" && ac_dir=. if test -f $ac_dir/$ac_word; then if [$3]; then ac_cv_path_$1="$ac_dir/$ac_word" break fi fi done IFS="$ac_save_ifs" dnl If no 4th arg is given, leave the cache variable unset, dnl so AC_PATH_PROGS will keep looking. ifelse([$4], , , [ test -z "[$]ac_cv_path_$1" && ac_cv_path_$1="$4" ])dnl ;; esac])dnl $1="$ac_cv_path_$1" if test ifelse([$4], , [-n "[$]$1"], ["[$]$1" != "$4"]); then AC_MSG_RESULT([$]$1) else AC_MSG_RESULT(no) fi AC_SUBST($1)dnl ]) # GLIB_WITH_NLS #----------------- glib_DEFUN([GLIB_WITH_NLS], dnl NLS is obligatory [USE_NLS=yes AC_SUBST(USE_NLS) gt_cv_have_gettext=no CATOBJEXT=NONE XGETTEXT=: INTLLIBS= AC_CHECK_HEADER(libintl.h, [gt_cv_func_dgettext_libintl="no" libintl_extra_libs="" # # First check in libc # AC_CACHE_CHECK([for ngettext in libc], gt_cv_func_ngettext_libc, [AC_TRY_LINK([ #include ], [return !ngettext ("","", 1)], gt_cv_func_ngettext_libc=yes, gt_cv_func_ngettext_libc=no) ]) if test "$gt_cv_func_ngettext_libc" = "yes" ; then AC_CACHE_CHECK([for dgettext in libc], gt_cv_func_dgettext_libc, [AC_TRY_LINK([ #include ], [return !dgettext ("","")], gt_cv_func_dgettext_libc=yes, gt_cv_func_dgettext_libc=no) ]) fi if test "$gt_cv_func_ngettext_libc" = "yes" ; then AC_CHECK_FUNCS(bind_textdomain_codeset) fi # # If we don't have everything we want, check in libintl # if test "$gt_cv_func_dgettext_libc" != "yes" \ || test "$gt_cv_func_ngettext_libc" != "yes" \ || test "$ac_cv_func_bind_textdomain_codeset" != "yes" ; then AC_CHECK_LIB(intl, bindtextdomain, [AC_CHECK_LIB(intl, ngettext, [AC_CHECK_LIB(intl, dgettext, gt_cv_func_dgettext_libintl=yes)])]) if test "$gt_cv_func_dgettext_libintl" != "yes" ; then AC_MSG_CHECKING([if -liconv is needed to use gettext]) AC_MSG_RESULT([]) AC_CHECK_LIB(intl, ngettext, [AC_CHECK_LIB(intl, dcgettext, [gt_cv_func_dgettext_libintl=yes libintl_extra_libs=-liconv], :,-liconv)], :,-liconv) fi # # If we found libintl, then check in it for bind_textdomain_codeset(); # we'll prefer libc if neither have bind_textdomain_codeset(), # and both have dgettext and ngettext # if test "$gt_cv_func_dgettext_libintl" = "yes" ; then glib_save_LIBS="$LIBS" LIBS="$LIBS -lintl $libintl_extra_libs" unset ac_cv_func_bind_textdomain_codeset AC_CHECK_FUNCS(bind_textdomain_codeset) LIBS="$glib_save_LIBS" if test "$ac_cv_func_bind_textdomain_codeset" = "yes" ; then gt_cv_func_dgettext_libc=no else if test "$gt_cv_func_dgettext_libc" = "yes" \ && test "$gt_cv_func_ngettext_libc" = "yes"; then gt_cv_func_dgettext_libintl=no fi fi fi fi if test "$gt_cv_func_dgettext_libc" = "yes" \ || test "$gt_cv_func_dgettext_libintl" = "yes"; then gt_cv_have_gettext=yes fi if test "$gt_cv_func_dgettext_libintl" = "yes"; then INTLLIBS="-lintl $libintl_extra_libs" fi if test "$gt_cv_have_gettext" = "yes"; then AC_DEFINE(HAVE_GETTEXT,1, [Define if the GNU gettext() function is already present or preinstalled.]) GLIB_PATH_PROG_WITH_TEST(MSGFMT, msgfmt, [test -z "`$ac_dir/$ac_word -h 2>&1 | grep 'dv '`"], no)dnl if test "$MSGFMT" != "no"; then glib_save_LIBS="$LIBS" LIBS="$LIBS $INTLLIBS" AC_CHECK_FUNCS(dcgettext) AC_PATH_PROG(GMSGFMT, gmsgfmt, $MSGFMT) GLIB_PATH_PROG_WITH_TEST(XGETTEXT, xgettext, [test -z "`$ac_dir/$ac_word -h 2>&1 | grep '(HELP)'`"], :) AC_TRY_LINK(, [extern int _nl_msg_cat_cntr; return _nl_msg_cat_cntr], [CATOBJEXT=.gmo DATADIRNAME=share], [case $host in *-*-solaris*) dnl On Solaris, if bind_textdomain_codeset is in libc, dnl GNU format message catalog is always supported, dnl since both are added to the libc all together. dnl Hence, we'd like to go with DATADIRNAME=share and dnl and CATOBJEXT=.gmo in this case. AC_CHECK_FUNC(bind_textdomain_codeset, [CATOBJEXT=.gmo DATADIRNAME=share], [CATOBJEXT=.mo DATADIRNAME=lib]) ;; *) CATOBJEXT=.mo DATADIRNAME=lib ;; esac]) LIBS="$glib_save_LIBS" INSTOBJEXT=.mo else gt_cv_have_gettext=no fi fi ]) if test "$gt_cv_have_gettext" = "yes" ; then AC_DEFINE(ENABLE_NLS, 1, [always defined to indicate that i18n is enabled]) fi dnl Test whether we really found GNU xgettext. if test "$XGETTEXT" != ":"; then dnl If it is not GNU xgettext we define it as : so that the dnl Makefiles still can work. if $XGETTEXT --omit-header /dev/null 2> /dev/null; then : ; else AC_MSG_RESULT( [found xgettext program is not GNU xgettext; ignore it]) XGETTEXT=":" fi fi # We need to process the po/ directory. POSUB=po AC_OUTPUT_COMMANDS( [case "$CONFIG_FILES" in *po/Makefile.in*) sed -e "/POTFILES =/r po/POTFILES" po/Makefile.in > po/Makefile esac]) dnl These rules are solely for the distribution goal. While doing this dnl we only have to keep exactly one list of the available catalogs dnl in configure.in. for lang in $ALL_LINGUAS; do GMOFILES="$GMOFILES $lang.gmo" POFILES="$POFILES $lang.po" done dnl Make all variables we use known to autoconf. AC_SUBST(CATALOGS) AC_SUBST(CATOBJEXT) AC_SUBST(DATADIRNAME) AC_SUBST(GMOFILES) AC_SUBST(INSTOBJEXT) AC_SUBST(INTLLIBS) AC_SUBST(PO_IN_DATADIR_TRUE) AC_SUBST(PO_IN_DATADIR_FALSE) AC_SUBST(POFILES) AC_SUBST(POSUB) ]) # AM_GLIB_GNU_GETTEXT # ------------------- # Do checks necessary for use of gettext. If a suitable implementation # of gettext is found in either in libintl or in the C library, # it will set INTLLIBS to the libraries needed for use of gettext # and AC_DEFINE() HAVE_GETTEXT and ENABLE_NLS. (The shell variable # gt_cv_have_gettext will be set to "yes".) It will also call AC_SUBST() # on various variables needed by the Makefile.in.in installed by # glib-gettextize. dnl glib_DEFUN([GLIB_GNU_GETTEXT], [AC_REQUIRE([AC_PROG_CC])dnl AC_REQUIRE([AC_HEADER_STDC])dnl GLIB_LC_MESSAGES GLIB_WITH_NLS if test "$gt_cv_have_gettext" = "yes"; then if test "x$ALL_LINGUAS" = "x"; then LINGUAS= else AC_MSG_CHECKING(for catalogs to be installed) NEW_LINGUAS= for presentlang in $ALL_LINGUAS; do useit=no if test "%UNSET%" != "${LINGUAS-%UNSET%}"; then desiredlanguages="$LINGUAS" else desiredlanguages="$ALL_LINGUAS" fi for desiredlang in $desiredlanguages; do # Use the presentlang catalog if desiredlang is # a. equal to presentlang, or # b. a variant of presentlang (because in this case, # presentlang can be used as a fallback for messages # which are not translated in the desiredlang catalog). case "$desiredlang" in "$presentlang"*) useit=yes;; esac done if test $useit = yes; then NEW_LINGUAS="$NEW_LINGUAS $presentlang" fi done LINGUAS=$NEW_LINGUAS AC_MSG_RESULT($LINGUAS) fi dnl Construct list of names of catalog files to be constructed. if test -n "$LINGUAS"; then for lang in $LINGUAS; do CATALOGS="$CATALOGS $lang$CATOBJEXT"; done fi fi dnl If the AC_CONFIG_AUX_DIR macro for autoconf is used we possibly dnl find the mkinstalldirs script in another subdir but ($top_srcdir). dnl Try to locate is. MKINSTALLDIRS= if test -n "$ac_aux_dir"; then MKINSTALLDIRS="$ac_aux_dir/mkinstalldirs" fi if test -z "$MKINSTALLDIRS"; then MKINSTALLDIRS="\$(top_srcdir)/mkinstalldirs" fi AC_SUBST(MKINSTALLDIRS) dnl Generate list of files to be processed by xgettext which will dnl be included in po/Makefile. test -d po || mkdir po if test "x$srcdir" != "x."; then if test "x`echo $srcdir | sed 's@/.*@@'`" = "x"; then posrcprefix="$srcdir/" else posrcprefix="../$srcdir/" fi else posrcprefix="../" fi rm -f po/POTFILES sed -e "/^#/d" -e "/^\$/d" -e "s,.*, $posrcprefix& \\\\," -e "\$s/\(.*\) \\\\/\1/" \ < $srcdir/po/POTFILES.in > po/POTFILES ]) # AM_GLIB_DEFINE_LOCALEDIR(VARIABLE) # ------------------------------- # Define VARIABLE to the location where catalog files will # be installed by po/Makefile. glib_DEFUN([GLIB_DEFINE_LOCALEDIR], [glib_REQUIRE([GLIB_GNU_GETTEXT])dnl glib_save_prefix="$prefix" glib_save_exec_prefix="$exec_prefix" test "x$prefix" = xNONE && prefix=$ac_default_prefix test "x$exec_prefix" = xNONE && exec_prefix=$prefix if test "x$CATOBJEXT" = "x.mo" ; then localedir=`eval echo "${libdir}/locale"` else localedir=`eval echo "${datadir}/locale"` fi prefix="$glib_save_prefix" exec_prefix="$glib_save_exec_prefix" AC_DEFINE_UNQUOTED($1, "$localedir", [Define the location where the catalogs will be installed]) ]) dnl dnl Now the definitions that aclocal will find dnl ifdef(glib_configure_in,[],[ AC_DEFUN([AM_GLIB_GNU_GETTEXT],[GLIB_GNU_GETTEXT($@)]) AC_DEFUN([AM_GLIB_DEFINE_LOCALEDIR],[GLIB_DEFINE_LOCALEDIR($@)]) ])dnl ================================================ FILE: m4/python.m4 ================================================ ## this one is commonly used with AM_PATH_PYTHONDIR ... dnl AM_CHECK_PYMOD(MODNAME [,SYMBOL [,ACTION-IF-FOUND [,ACTION-IF-NOT-FOUND]]]) dnl Check if a module containing a given symbol is visible to python. AC_DEFUN([AM_CHECK_PYMOD], [AC_REQUIRE([AM_PATH_PYTHON]) py_mod_var=`echo $1['_']$2 | sed 'y%./+-%__p_%'` AC_MSG_CHECKING(for ifelse([$2],[],,[$2 in ])python module $1) AC_CACHE_VAL(py_cv_mod_$py_mod_var, [ ifelse([$2],[], [prog=" import sys try: import $1 except ImportError: sys.exit(1) except: sys.exit(0) sys.exit(0)"], [prog=" import $1 $1.$2"]) if $PYTHON -c "$prog" 1>&AC_FD_CC 2>&AC_FD_CC then eval "py_cv_mod_$py_mod_var=yes" else eval "py_cv_mod_$py_mod_var=no" fi ]) py_val=`eval "echo \`echo '$py_cv_mod_'$py_mod_var\`"` if test "x$py_val" != xno; then AC_MSG_RESULT(yes) ifelse([$3], [],, [$3 ])dnl else AC_MSG_RESULT(no) ifelse([$4], [],, [$4 ])dnl fi ]) dnl a macro to check for ability to create python extensions dnl AM_CHECK_PYTHON_HEADERS([ACTION-IF-POSSIBLE], [ACTION-IF-NOT-POSSIBLE]) dnl function also defines PYTHON_INCLUDES AC_DEFUN([AM_CHECK_PYTHON_HEADERS], [AC_REQUIRE([AM_PATH_PYTHON]) AC_MSG_CHECKING(for headers required to compile python extensions) dnl deduce PYTHON_INCLUDES py_prefix=`$PYTHON -c "import sys; print sys.prefix"` py_exec_prefix=`$PYTHON -c "import sys; print sys.exec_prefix"` if test -x "$PYTHON-config"; then PYTHON_INCLUDES=`$PYTHON-config --includes 2>/dev/null` else PYTHON_INCLUDES="-I${py_prefix}/include/python${PYTHON_VERSION}" if test "$py_prefix" != "$py_exec_prefix"; then PYTHON_INCLUDES="$PYTHON_INCLUDES -I${py_exec_prefix}/include/python${PYTHON_VERSION}" fi fi AC_SUBST(PYTHON_INCLUDES) dnl check if the headers exist: save_CPPFLAGS="$CPPFLAGS" CPPFLAGS="$CPPFLAGS $PYTHON_INCLUDES" AC_TRY_CPP([#include ],dnl [AC_MSG_RESULT(found) $1],dnl [AC_MSG_RESULT(not found) $2]) CPPFLAGS="$save_CPPFLAGS" ]) ================================================ FILE: notification-server/.golangci.yml ================================================ run: timeout: 2m linters: enable: - govet - gocyclo - gosimple - ineffassign - staticcheck - unused - gofmt disable: - errcheck ================================================ FILE: notification-server/ccnet.conf ================================================ [Database] ENGINE = mysql HOST = 127.0.0.1 USER = seafile PASSWD = seafile DB = ccnet-db CREATE_TABLES=true ================================================ FILE: notification-server/client.go ================================================ package main import ( "encoding/json" "fmt" "runtime/debug" "time" jwt "github.com/golang-jwt/jwt/v5" "github.com/gorilla/websocket" log "github.com/sirupsen/logrus" ) const ( writeWait = 1 * time.Second pongWait = 5 * time.Second // Send pings to peer with this period. Must be less than pongWait. pingPeriod = 1 * time.Second checkTokenPeriod = 1 * time.Hour ) // Message is the message communicated between clients and server. type Message struct { Type string `json:"type"` Content json.RawMessage `json:"content"` } type SubList struct { Repos []Repo `json:"repos"` } type UnsubList struct { Repos []Repo `json:"repos"` } type Repo struct { RepoID string `json:"id"` Token string `json:"jwt_token"` } type myClaims struct { Exp int64 `json:"exp"` RepoID string `json:"repo_id"` UserName string `json:"username"` jwt.RegisteredClaims } func (*myClaims) Valid() error { return nil } func (client *Client) Close() { client.conn.Close() } func RecoverWrapper(f func()) { defer func() { if err := recover(); err != nil { log.Printf("panic: %v\n%s", err, debug.Stack()) } }() f() } // HandleMessages connects to the client to process message. func (client *Client) HandleMessages() { // Set keep alive. client.conn.SetPongHandler(func(string) error { client.Alive = time.Now() return nil }) client.ConnCloser.AddRunning(4) go RecoverWrapper(client.readMessages) go RecoverWrapper(client.writeMessages) go RecoverWrapper(client.checkTokenExpired) go RecoverWrapper(client.keepAlive) client.ConnCloser.Wait() client.Close() UnregisterClient(client) for id := range client.Repos { client.unsubscribe(id) } } func (client *Client) readMessages() { conn := client.conn defer func() { client.ConnCloser.Done() }() for { select { case <-client.ConnCloser.HasBeenClosed(): return default: } var msg Message err := conn.ReadJSON(&msg) if err != nil { client.ConnCloser.Signal() log.Debugf("failed to read json data from client: %s: %v", client.Addr, err) return } err = client.handleMessage(&msg) if err != nil { client.ConnCloser.Signal() log.Debugf("%v", err) return } } } func checkToken(tokenString, repoID string) (string, int64, bool) { if len(tokenString) == 0 { return "", -1, false } claims := new(myClaims) token, err := jwt.ParseWithClaims(tokenString, claims, func(token *jwt.Token) (interface{}, error) { return []byte(privateKey), nil }) if err != nil { return "", -1, false } if !token.Valid { return "", -1, false } now := time.Now() if claims.RepoID != repoID || claims.Exp <= now.Unix() { return "", -1, false } return claims.UserName, claims.Exp, true } func (client *Client) handleMessage(msg *Message) error { content := msg.Content if msg.Type == "subscribe" { var list SubList err := json.Unmarshal(content, &list) if err != nil { return err } for _, repo := range list.Repos { user, exp, valid := checkToken(repo.Token, repo.RepoID) if !valid { client.notifJWTExpired(repo.RepoID) continue } client.subscribe(repo.RepoID, user, exp) } } else if msg.Type == "unsubscribe" { var list UnsubList err := json.Unmarshal(content, &list) if err != nil { return err } for _, r := range list.Repos { client.unsubscribe(r.RepoID) } } else { err := fmt.Errorf("recv unexpected type of message: %s", msg.Type) return err } return nil } // subscribe subscribes to notifications of repos. func (client *Client) subscribe(repoID, user string, exp int64) { client.User = user client.ReposMutex.Lock() client.Repos[repoID] = exp client.ReposMutex.Unlock() subMutex.Lock() subscribers, ok := subscriptions[repoID] if !ok { subscribers = newSubscribers(client) subscriptions[repoID] = subscribers } subMutex.Unlock() subscribers.Mutex.Lock() subscribers.Clients[client.ID] = client subscribers.Mutex.Unlock() } func (client *Client) unsubscribe(repoID string) { client.ReposMutex.Lock() delete(client.Repos, repoID) client.ReposMutex.Unlock() subMutex.Lock() subscribers, ok := subscriptions[repoID] if !ok { subMutex.Unlock() return } subMutex.Unlock() subscribers.Mutex.Lock() delete(subscribers.Clients, client.ID) subscribers.Mutex.Unlock() } func (client *Client) writeMessages() { defer func() { client.ConnCloser.Done() }() for { select { case msg := <-client.WCh: client.conn.SetWriteDeadline(time.Now().Add(writeWait)) client.connMutex.Lock() err := client.conn.WriteJSON(msg) client.connMutex.Unlock() if err != nil { client.ConnCloser.Signal() log.Debugf("failed to send notification to client: %v", err) return } m, _ := msg.(*Message) log.Debugf("send %s event to client %s(%d): %s", m.Type, client.User, client.ID, string(m.Content)) case <-client.ConnCloser.HasBeenClosed(): return } } } func (client *Client) keepAlive() { defer func() { client.ConnCloser.Done() }() ticker := time.NewTicker(pingPeriod) for { select { case <-ticker.C: if time.Since(client.Alive) > pongWait { client.ConnCloser.Signal() log.Debugf("disconnected because no pong was received for more than %v", pongWait) return } client.conn.SetWriteDeadline(time.Now().Add(writeWait)) client.connMutex.Lock() err := client.conn.WriteMessage(websocket.PingMessage, nil) client.connMutex.Unlock() if err != nil { client.ConnCloser.Signal() log.Debugf("failed to send ping message to client: %v", err) return } case <-client.ConnCloser.HasBeenClosed(): return } } } func (client *Client) checkTokenExpired() { defer func() { client.ConnCloser.Done() }() ticker := time.NewTicker(checkTokenPeriod) for { select { case <-ticker.C: // unsubscribe will delete repo from client.Repos, we'd better unsubscribe repos later. pendingRepos := make(map[string]struct{}) now := time.Now() client.ReposMutex.Lock() for repoID, exp := range client.Repos { if exp >= now.Unix() { continue } pendingRepos[repoID] = struct{}{} } client.ReposMutex.Unlock() for repoID := range pendingRepos { client.unsubscribe(repoID) client.notifJWTExpired(repoID) } case <-client.ConnCloser.HasBeenClosed(): return } } } func (client *Client) notifJWTExpired(repoID string) { msg := new(Message) msg.Type = "jwt-expired" content := fmt.Sprintf("{\"repo_id\":\"%s\"}", repoID) msg.Content = []byte(content) client.WCh <- msg } ================================================ FILE: notification-server/dup2.go ================================================ //go:build !(linux && arm64) package main import ( "syscall" ) func Dup(from, to int) error { return syscall.Dup2(from, to) } ================================================ FILE: notification-server/dup3.go ================================================ //go:build linux && arm64 package main import ( "syscall" ) func Dup(from, to int) error { return syscall.Dup3(from, to, 0) } ================================================ FILE: notification-server/event.go ================================================ package main import ( "context" "encoding/json" "reflect" "runtime/debug" "time" log "github.com/sirupsen/logrus" ) type RepoUpdateEvent struct { RepoID string `json:"repo_id"` CommitID string `json:"commit_id"` } type FileLockEvent struct { RepoID string `json:"repo_id"` Path string `json:"path"` ChangeEvent string `json:"change_event"` LockUser string `json:"lock_user"` } type FolderPermEvent struct { RepoID string `json:"repo_id"` Path string `json:"path"` Type string `json:"type"` ChangeEvent string `json:"change_event"` User string `json:"user"` Group int `json:"group"` Perm string `json:"perm"` } type CommentEvent struct { RepoID string `json:"repo_id"` Type string `json:"type"` FileUUID string `json:"file_uuid"` FilePath string `json:"file_path"` } func Notify(msg *Message) { var repoID string // userList is the list of users who need to be notified, if it is nil, all subscribed users will be notified. var userList map[string]struct{} content := msg.Content switch msg.Type { case "repo-update": var event RepoUpdateEvent err := json.Unmarshal(content, &event) if err != nil { log.Warn(err) return } repoID = event.RepoID case "file-lock-changed": var event FileLockEvent err := json.Unmarshal(content, &event) if err != nil { log.Warn(err) return } repoID = event.RepoID case "folder-perm-changed": var event FolderPermEvent err := json.Unmarshal(content, &event) if err != nil { log.Warn(err) return } repoID = event.RepoID if event.User != "" { userList = make(map[string]struct{}) userList[event.User] = struct{}{} } else if event.Group != -1 { userList = getGroupMembers(event.Group) } case "comment-update": var event CommentEvent err := json.Unmarshal(content, &event) if err != nil { log.Warn(err) return } repoID = event.RepoID default: return } clients := make(map[uint64]*Client) subMutex.RLock() subscribers := subscriptions[repoID] if subscribers == nil { subMutex.RUnlock() return } subMutex.RUnlock() subscribers.Mutex.RLock() for clientID, client := range subscribers.Clients { clients[clientID] = client } subscribers.Mutex.RUnlock() go func() { defer func() { if err := recover(); err != nil { log.Printf("panic: %v\n%s", err, debug.Stack()) } }() // In order to avoid being blocked on a Client for a long time, it is necessary to write WCh in a non-blocking way, // and the waiting WCh needs to be blocked and processed after other Clients have finished writing. value := reflect.ValueOf(msg) var branches []reflect.SelectCase for _, client := range clients { if !needToNotif(userList, client.User) { continue } branch := reflect.SelectCase{Dir: reflect.SelectSend, Chan: reflect.ValueOf(client.WCh), Send: value} branches = append(branches, branch) } for len(branches) != 0 { index, _, _ := reflect.Select(branches) branches = append(branches[:index], branches[index+1:]...) } }() } func getGroupMembers(group int) map[string]struct{} { query := `SELECT user_name FROM GroupUser WHERE group_id = ?` ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second) defer cancel() stmt, err := ccnetDB.PrepareContext(ctx, query) if err != nil { log.Printf("failed to prepare sql: %s:%v", query, err) return nil } defer stmt.Close() rows, err := stmt.QueryContext(ctx, group) if err != nil { log.Printf("failed to query sql: %v", err) return nil } defer rows.Close() userList := make(map[string]struct{}) var userName string for rows.Next() { if err := rows.Scan(&userName); err == nil { userList[userName] = struct{}{} } } if err := rows.Err(); err != nil { log.Printf("failed to scan sql rows: %v", err) return nil } return userList } func needToNotif(userList map[string]struct{}, user string) bool { if userList == nil { return true } _, ok := userList[user] return ok } ================================================ FILE: notification-server/go.mod ================================================ module github.com/haiwen/seafile-server/notification-server go 1.17 require ( github.com/dgraph-io/ristretto v0.2.0 github.com/go-sql-driver/mysql v1.5.0 github.com/golang-jwt/jwt/v5 v5.2.2 github.com/gorilla/mux v1.8.0 github.com/gorilla/websocket v1.4.2 github.com/sirupsen/logrus v1.9.3 ) require ( github.com/cespare/xxhash/v2 v2.1.1 // indirect github.com/dustin/go-humanize v1.0.1 // indirect github.com/pkg/errors v0.9.1 // indirect golang.org/x/sys v0.11.0 //indirect ) ================================================ FILE: notification-server/go.sum ================================================ github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dgraph-io/ristretto v0.2.0 h1:XAfl+7cmoUDWW/2Lx8TGZQjjxIQ2Ley9DSf52dru4WE= github.com/dgraph-io/ristretto v0.2.0/go.mod h1:8uBHCU/PBV4Ag0CJrP47b9Ofby5dqWNh4FicAdoqFNU= github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 h1:fAjc9m62+UWV/WAFKLNi6ZS0675eEUC9y3AlwSbQu1Y= github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/go-sql-driver/mysql v1.5.0 h1:ozyZYNQW3x3HtqT1jira07DN2PArx2v7/mN66gGcHOs= github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/golang-jwt/jwt/v5 v5.2.2 h1:Rl4B7itRWVtYIHFrSNd7vhTiz9UpLdi6gZhZ3wEeDy8= github.com/golang-jwt/jwt/v5 v5.2.2/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= ================================================ FILE: notification-server/logger.go ================================================ package main import ( "fmt" "strings" log "github.com/sirupsen/logrus" ) const ( timestampFormat = "[2006-01-02 15:04:05] " ) type LogFormatter struct{} func (f *LogFormatter) Format(entry *log.Entry) ([]byte, error) { levelStr := entry.Level.String() if levelStr == "fatal" { levelStr = "ERROR" } else { levelStr = strings.ToUpper(levelStr) } level := fmt.Sprintf("[%s] ", levelStr) appName := "" if logToStdout { appName = "[notification-server] " } buf := make([]byte, 0, len(appName)+len(timestampFormat)+len(level)+len(entry.Message)+1) if logToStdout { buf = append(buf, appName...) } buf = entry.Time.AppendFormat(buf, timestampFormat) buf = append(buf, level...) buf = append(buf, entry.Message...) buf = append(buf, '\n') return buf, nil } ================================================ FILE: notification-server/server.go ================================================ package main import ( "database/sql" "encoding/json" "flag" "fmt" "io" "net/http" "os" "os/signal" "path/filepath" "strconv" "strings" "syscall" "time" _ "github.com/go-sql-driver/mysql" jwt "github.com/golang-jwt/jwt/v5" "github.com/gorilla/mux" "github.com/gorilla/websocket" log "github.com/sirupsen/logrus" ) var configDir string var logFile, absLogFile string var privateKey string var host string var port uint32 var logFp *os.File var ccnetDB *sql.DB var logToStdout bool func init() { flag.StringVar(&configDir, "c", "", "config directory") flag.StringVar(&logFile, "l", "", "log file path") env := os.Getenv("SEAFILE_LOG_TO_STDOUT") if env == "true" { logToStdout = true } log.SetFormatter(&LogFormatter{}) } func loadNotifConfig() { host = os.Getenv("NOTIFICATION_SERVER_HOST") if host == "" { host = "0.0.0.0" } port = 8083 if os.Getenv("NOTIFICATION_SERVER_PORT") != "" { i, err := strconv.Atoi(os.Getenv("NOTIFICATION_SERVER_PORT")) if err == nil { port = uint32(i) } } logLevel := os.Getenv("NOTIFICATION_SERVER_LOG_LEVEL") if logLevel == "" { logLevel = "info" } level, err := log.ParseLevel(logLevel) if err != nil { log.Info("use the default log level: info") log.SetLevel(log.InfoLevel) } else { log.SetLevel(level) } } func loadCcnetDB() { option, err := loadDBOptionFromEnv() if err != nil { log.Fatalf("Failed to load database from env: %v", err) } var dsn string if option.UnixSocket == "" { dsn = fmt.Sprintf("%s:%s@tcp(%s:%d)/%s?tls=%t&readTimeout=60s&writeTimeout=60s", option.User, option.Password, option.Host, option.Port, option.CcnetDbName, option.UseTLS) } else { dsn = fmt.Sprintf("%s:%s@unix(%s)/%s?readTimeout=60s&writeTimeout=60s", option.User, option.Password, option.UnixSocket, option.CcnetDbName) } ccnetDB, err = sql.Open("mysql", dsn) if err != nil { log.Fatalf("Failed to open database: %v", err) } if err := ccnetDB.Ping(); err != nil { log.Fatalf("Failed to connected to mysql: %v", err) } ccnetDB.SetConnMaxLifetime(5 * time.Minute) ccnetDB.SetMaxOpenConns(8) ccnetDB.SetMaxIdleConns(8) } type DBOption struct { User string Password string Host string Port int CcnetDbName string SeafileDbName string UnixSocket string UseTLS bool } func loadDBOptionFromEnv() (*DBOption, error) { user := os.Getenv("SEAFILE_MYSQL_DB_USER") if user == "" { return nil, fmt.Errorf("failed to read SEAFILE_MYSQL_DB_USER") } password := os.Getenv("SEAFILE_MYSQL_DB_PASSWORD") if password == "" { return nil, fmt.Errorf("failed to read SEAFILE_MYSQL_DB_PASSWORD") } host := os.Getenv("SEAFILE_MYSQL_DB_HOST") if host == "" { return nil, fmt.Errorf("failed to read SEAFILE_MYSQL_DB_HOST") } port := 3306 portStr := os.Getenv("SEAFILE_MYSQL_DB_PORT") if portStr != "" { p, _ := strconv.ParseUint(portStr, 10, 32) if p > 0 { port = int(p) } } ccnetDbName := os.Getenv("SEAFILE_MYSQL_DB_CCNET_DB_NAME") if ccnetDbName == "" { ccnetDbName = "ccnet_db" log.Infof("Failed to read SEAFILE_MYSQL_DB_CCNET_DB_NAME, use ccnet_db by default") } seafileDbName := os.Getenv("SEAFILE_MYSQL_DB_SEAFILE_DB_NAME") if seafileDbName == "" { seafileDbName = "seafile_db" log.Infof("Failed to read SEAFILE_MYSQL_DB_SEAFILE_DB_NAME, use seafile_db by default") } log.Infof("Database: user = %s", user) log.Infof("Database: host = %s", host) log.Infof("Database: port = %d", port) log.Infof("Database: ccnet_db_name = %s", ccnetDbName) log.Infof("Database: seafile_db_name = %s", seafileDbName) option := new(DBOption) option.User = user option.Password = password option.Host = host option.Port = port option.CcnetDbName = ccnetDbName option.SeafileDbName = seafileDbName return option, nil } func main() { flag.Parse() if configDir == "" { log.Fatal("config directory must be specified.") } _, err := os.Stat(configDir) if os.IsNotExist(err) { log.Fatalf("config directory %s doesn't exist: %v.", configDir, err) } if logToStdout { // Use default output (StdOut) } else if logFile == "" { absLogFile = filepath.Join(configDir, "notification-server.log") fp, err := os.OpenFile(absLogFile, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0644) if err != nil { log.Fatalf("Failed to open or create log file: %v", err) } logFp = fp log.SetOutput(fp) } else if logFile != "-" { absLogFile, err = filepath.Abs(logFile) if err != nil { log.Fatalf("Failed to convert log file path to absolute path: %v", err) } fp, err := os.OpenFile(absLogFile, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0644) if err != nil { log.Fatalf("Failed to open or create log file: %v", err) } logFp = fp log.SetOutput(fp) } if absLogFile != "" && !logToStdout { Dup(int(logFp.Fd()), int(os.Stderr.Fd())) } if err := loadJwtPrivateKey(); err != nil { log.Fatalf("Failed to read config: %v", err) } loadNotifConfig() loadCcnetDB() Init() go handleUser1Signal() router := newHTTPRouter() log.Info("notification server started.") server := new(http.Server) server.Addr = fmt.Sprintf("%s:%d", host, port) server.Handler = router err = server.ListenAndServe() if err != nil { log.Infof("notificationserver exiting: %v", err) } } func loadJwtPrivateKey() error { privateKey = os.Getenv("JWT_PRIVATE_KEY") if privateKey == "" { return fmt.Errorf("failed to read JWT_PRIVATE_KEY") } return nil } func handleUser1Signal() { signalChan := make(chan os.Signal, 1) signal.Notify(signalChan, syscall.SIGUSR1) for { <-signalChan logRotate() } } func logRotate() { if logToStdout { return } fp, err := os.OpenFile(absLogFile, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0644) if err != nil { log.Fatalf("Failed to reopen notification log: %v", err) } log.SetOutput(fp) if logFp != nil { logFp.Close() logFp = fp } Dup(int(logFp.Fd()), int(os.Stderr.Fd())) } func newHTTPRouter() *mux.Router { r := mux.NewRouter() r.Handle("/", appHandler(messageCB)) r.Handle("/events{slash:\\/?}", appHandler(eventCB)) r.Handle("/ping{slash:\\/?}", appHandler(pingCB)) return r } // Any http request will be automatically upgraded to websocket. func messageCB(rsp http.ResponseWriter, r *http.Request) *appError { upgrader := newUpgrader() conn, err := upgrader.Upgrade(rsp, r, nil) if err != nil { log.Warnf("failed to upgrade http to websocket: %v", err) // Don't return eror here, because the upgrade fails, then Upgrade replies to the client with an HTTP error response. return nil } addr := r.Header.Get("x-forwarded-for") if addr == "" { addr = conn.RemoteAddr().String() } client := NewClient(conn, addr) RegisterClient(client) client.HandleMessages() return nil } func eventCB(rsp http.ResponseWriter, r *http.Request) *appError { msg := Message{} token := getAuthorizationToken(r.Header) if !checkAuthToken(token) { return &appError{Error: nil, Message: "Notification token not match", Code: http.StatusBadRequest, } } body, err := io.ReadAll(r.Body) if err != nil { return &appError{Error: err, Message: "", Code: http.StatusInternalServerError, } } if err := json.Unmarshal(body, &msg); err != nil { return &appError{Error: err, Message: "", Code: http.StatusInternalServerError, } } Notify(&msg) return nil } func getAuthorizationToken(h http.Header) string { auth := h.Get("Authorization") splitResult := strings.Split(auth, " ") if len(splitResult) > 1 { return splitResult[1] } return "" } func checkAuthToken(tokenString string) bool { if len(tokenString) == 0 { return false } claims := new(myClaims) token, err := jwt.ParseWithClaims(tokenString, claims, func(token *jwt.Token) (interface{}, error) { return []byte(privateKey), nil }) if err != nil { return false } if !token.Valid { return false } now := time.Now() return claims.Exp > now.Unix() } func newUpgrader() *websocket.Upgrader { upgrader := &websocket.Upgrader{ ReadBufferSize: 4096, WriteBufferSize: 1024, CheckOrigin: func(r *http.Request) bool { return true }, } return upgrader } func pingCB(rsp http.ResponseWriter, r *http.Request) *appError { fmt.Fprintln(rsp, "{\"ret\": \"pong\"}") return nil } type appError struct { Error error Message string Code int } type appHandler func(http.ResponseWriter, *http.Request) *appError func (fn appHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { e := fn(w, r) if e != nil { if e.Error != nil && e.Code == http.StatusInternalServerError { log.Infof("path %s internal server error: %v\n", r.URL.Path, e.Error) } http.Error(w, e.Message, e.Code) } } ================================================ FILE: notification-server/subscriptions.go ================================================ package main import ( "sync" "sync/atomic" "time" "github.com/dgraph-io/ristretto/z" "github.com/gorilla/websocket" ) const ( chanBufSize = 10 ) // clients is a map from client id to Client structs. // It contains all current connected clients. Each client is identified by 64-bit ID. var clients map[uint64]*Client var clientsMutex sync.RWMutex // Use atomic operation to increase this value. var nextClientID uint64 = 1 // subscriptions is a map from repo_id to Subscribers struct. // It's protected by rw mutex. var subscriptions map[string]*Subscribers var subMutex sync.RWMutex // Client contains information about a client. // Two go routines are associated with each client to handle message reading and writting. // Messages sent to the client have to be written into WCh, since only one go routine can write to a websocket connection. type Client struct { // The ID of this client ID uint64 // Websocket connection. conn *websocket.Conn // Connections do not support concurrent writers. Protect write with a mutex. connMutex sync.Mutex // WCh is used to write messages to a client. // The structs written into the channel will be converted to JSON and sent to client. WCh chan interface{} // Repos is the repos this client subscribed to. Repos map[string]int64 ReposMutex sync.Mutex // Alive is the last time received pong. Alive time.Time ConnCloser *z.Closer // Addr is the address of client. Addr string // User is the user of client. User string } // Subscribers contains the clients who subscribe to a repo's notifications. type Subscribers struct { // Clients is a map from client id to Client struct, protected by rw mutex. Clients map[uint64]*Client Mutex sync.RWMutex } // Init inits clients and subscriptions. func Init() { clients = make(map[uint64]*Client) subscriptions = make(map[string]*Subscribers) } // NewClient creates a new client. func NewClient(conn *websocket.Conn, addr string) *Client { client := new(Client) client.ID = atomic.AddUint64(&nextClientID, 1) client.conn = conn client.WCh = make(chan interface{}, chanBufSize) client.Repos = make(map[string]int64) client.Alive = time.Now() client.Addr = addr client.ConnCloser = z.NewCloser(0) return client } // Register adds the client to the list of clients. func RegisterClient(client *Client) { clientsMutex.Lock() clients[client.ID] = client clientsMutex.Unlock() } // Unregister deletes the client from the list of clients. func UnregisterClient(client *Client) { clientsMutex.Lock() delete(clients, client.ID) clientsMutex.Unlock() } func newSubscribers(client *Client) *Subscribers { subscribers := new(Subscribers) subscribers.Clients = make(map[uint64]*Client) subscribers.Clients[client.ID] = client return subscribers } ================================================ FILE: pytest.ini ================================================ [pytest] addopts = -vv -s log_format = %(asctime)s:%(name)s:%(levelname)s:%(message)s log_date_format = %Y-%m-%d %H:%M:%S # log_cli_level = info ================================================ FILE: python/LICENSE.txt ================================================ Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ================================================ FILE: python/Makefile.am ================================================ SUBDIRS = seafile seaserv ================================================ FILE: python/seafile/Makefile.am ================================================ seafiledir=${pyexecdir}/seafile seafile_PYTHON = __init__.py rpcclient.py ================================================ FILE: python/seafile/__init__.py ================================================ from .rpcclient import SeafServerThreadedRpcClient as ServerThreadedRpcClient class TaskType(object): DOWNLOAD = 0 UPLOAD = 1 ================================================ FILE: python/seafile/rpcclient.py ================================================ from pysearpc import searpc_func, SearpcError, NamedPipeClient class SeafServerThreadedRpcClient(NamedPipeClient): def __init__(self, pipe_path): NamedPipeClient.__init__(self, pipe_path, "seafserv-threaded-rpcserver") # repo manipulation @searpc_func("string", ["string", "string", "string", "string", "int", "string", "string"]) def seafile_create_repo(name, desc, owner_email, passwd, enc_version, pwd_hash_algo, pwd_hash_params): pass create_repo = seafile_create_repo @searpc_func("string", ["string", "string", "string", "string", "string", "string", "string", "int", "string", "string", "string"]) def seafile_create_enc_repo(repo_id, name, desc, owner_email, magic, random_key, salt, enc_version, pwd_hash, pwd_hash_algo, pwd_hash_params): pass create_enc_repo = seafile_create_enc_repo @searpc_func("objlist", ["string", "int", "int"]) def seafile_get_repos_by_id_prefix(id_prefix, start, limit): pass get_repos_by_id_prefix = seafile_get_repos_by_id_prefix @searpc_func("object", ["string"]) def seafile_get_repo(repo_id): pass get_repo = seafile_get_repo @searpc_func("int", ["string"]) def seafile_destroy_repo(repo_id): pass remove_repo = seafile_destroy_repo @searpc_func("objlist", ["int", "int", "string", "int"]) def seafile_get_repo_list(start, limit, order_by, ret_virt_repo): pass get_repo_list = seafile_get_repo_list @searpc_func("int64", []) def seafile_count_repos(): pass count_repos = seafile_count_repos @searpc_func("int", ["string", "string", "string", "string"]) def seafile_edit_repo(repo_id, name, description, user): pass edit_repo = seafile_edit_repo @searpc_func("int", ["string", "string"]) def seafile_is_repo_owner(user_id, repo_id): pass is_repo_owner = seafile_is_repo_owner @searpc_func("int", ["string", "string"]) def seafile_set_repo_owner(email, repo_id): pass set_repo_owner = seafile_set_repo_owner @searpc_func("string", ["string"]) def seafile_get_repo_owner(repo_id): pass get_repo_owner = seafile_get_repo_owner @searpc_func("objlist", []) def seafile_get_orphan_repo_list(): pass get_orphan_repo_list = seafile_get_orphan_repo_list @searpc_func("objlist", ["string", "int", "int", "int"]) def seafile_list_owned_repos(user_id, ret_corrupted, start, limit): pass list_owned_repos = seafile_list_owned_repos @searpc_func("objlist", ["string"]) def seafile_search_repos_by_name(name): pass search_repos_by_name = seafile_search_repos_by_name @searpc_func("int64", ["string"]) def seafile_server_repo_size(repo_id): pass server_repo_size = seafile_server_repo_size @searpc_func("int", ["string", "string"]) def seafile_repo_set_access_property(repo_id, role): pass repo_set_access_property = seafile_repo_set_access_property @searpc_func("string", ["string"]) def seafile_repo_query_access_property(repo_id): pass repo_query_access_property = seafile_repo_query_access_property @searpc_func("int", ["string", "string", "string"]) def seafile_revert_on_server(repo_id, commit_id, user_name): pass revert_on_server = seafile_revert_on_server @searpc_func("objlist", ["string", "string", "string"]) def seafile_diff(): pass get_diff = seafile_diff @searpc_func("int", ["string", "string", "string", "string", "string"]) def seafile_post_file(repo_id, tmp_file_path, parent_dir, filename, user): pass post_file = seafile_post_file @searpc_func("int", ["string", "string", "string", "string"]) def seafile_post_dir(repo_id, parent_dir, new_dir_name, user): pass post_dir = seafile_post_dir @searpc_func("int", ["string", "string", "string", "string"]) def seafile_post_empty_file(repo_id, parent_dir, filename, user): pass post_empty_file = seafile_post_empty_file @searpc_func("int", ["string", "string", "string", "string", "string", "string"]) def seafile_put_file(repo_id, tmp_file_path, parent_dir, filename, user, head_id): pass put_file = seafile_put_file @searpc_func("int", ["string", "string", "string", "string"]) def seafile_del_file(repo_id, parent_dir, filename, user): pass del_file = seafile_del_file @searpc_func("int", ["string", "string", "string"]) def seafile_batch_del_files(repo_id, filepaths, user): pass batch_del_files = seafile_batch_del_files @searpc_func("object", ["string", "string", "string", "string", "string", "string", "string", "int", "int"]) def seafile_copy_file(src_repo, src_dir, src_filename, dst_repo, dst_dir, dst_filename, user, need_progress, synchronous): pass copy_file = seafile_copy_file @searpc_func("object", ["string", "string", "string", "string", "string", "string", "int", "string", "int", "int"]) def seafile_move_file(src_repo, src_dir, src_filename, dst_repo, dst_dir, dst_filename, replace, user, need_progress, synchronous): pass move_file = seafile_move_file @searpc_func("int", ["string", "string", "string", "string", "string"]) def seafile_rename_file(repo_id, parent_dir, oldname, newname, user): pass rename_file = seafile_rename_file @searpc_func("int", ["string", "string"]) def seafile_is_valid_filename(repo_id, filename): pass is_valid_filename = seafile_is_valid_filename @searpc_func("object", ["string", "int", "string"]) def seafile_get_commit(repo_id, version, commit_id): pass get_commit = seafile_get_commit @searpc_func("string", ["string", "string", "int", "int"]) def seafile_list_file_blocks(repo_id, file_id, offset, limit): pass list_file_blocks = seafile_list_file_blocks @searpc_func("objlist", ["string", "string", "int", "int"]) def seafile_list_dir(repo_id, dir_id, offset, limit): pass list_dir = seafile_list_dir @searpc_func("objlist", ["string", "string", "sting", "string", "int", "int"]) def list_dir_with_perm(repo_id, dir_path, dir_id, user, offset, limit): pass @searpc_func("int64", ["string", "int", "string"]) def seafile_get_file_size(store_id, version, file_id): pass get_file_size = seafile_get_file_size @searpc_func("int64", ["string", "int", "string"]) def seafile_get_dir_size(store_id, version, dir_id): pass get_dir_size = seafile_get_dir_size @searpc_func("objlist", ["string", "string", "string"]) def seafile_list_dir_by_path(repo_id, commit_id, path): pass list_dir_by_path = seafile_list_dir_by_path @searpc_func("string", ["string", "string", "string"]) def seafile_get_dir_id_by_commit_and_path(repo_id, commit_id, path): pass get_dir_id_by_commit_and_path = seafile_get_dir_id_by_commit_and_path @searpc_func("string", ["string", "string"]) def seafile_get_file_id_by_path(repo_id, path): pass get_file_id_by_path = seafile_get_file_id_by_path @searpc_func("string", ["string", "string"]) def seafile_get_dir_id_by_path(repo_id, path): pass get_dir_id_by_path = seafile_get_dir_id_by_path @searpc_func("string", ["string", "string", "string"]) def seafile_get_file_id_by_commit_and_path(repo_id, commit_id, path): pass get_file_id_by_commit_and_path = seafile_get_file_id_by_commit_and_path @searpc_func("object", ["string", "string"]) def seafile_get_dirent_by_path(repo_id, commit_id, path): pass get_dirent_by_path = seafile_get_dirent_by_path @searpc_func("objlist", ["string", "string", "string", "int"]) def seafile_list_file_revisions(repo_id, commit_id, path, limit): pass list_file_revisions = seafile_list_file_revisions @searpc_func("objlist", ["string", "string"]) def seafile_calc_files_last_modified(repo_id, parent_dir, limit): pass calc_files_last_modified = seafile_calc_files_last_modified @searpc_func("int", ["string", "string", "string", "string"]) def seafile_revert_file(repo_id, commit_id, path, user): pass revert_file = seafile_revert_file @searpc_func("string", ["string", "string"]) def seafile_check_repo_blocks_missing(repo_id, blklist): pass check_repo_blocks_missing = seafile_check_repo_blocks_missing @searpc_func("int", ["string", "string", "string", "string"]) def seafile_revert_dir(repo_id, commit_id, path, user): pass revert_dir = seafile_revert_dir @searpc_func("objlist", ["string", "int", "string", "string", "int"]) def get_deleted(repo_id, show_days, path, scan_stat, limit): pass # share repo to user @searpc_func("string", ["string", "string", "string", "string"]) def seafile_add_share(repo_id, from_email, to_email, permission): pass add_share = seafile_add_share @searpc_func("objlist", ["string", "string", "int", "int"]) def seafile_list_share_repos(email, query_col, start, limit): pass list_share_repos = seafile_list_share_repos @searpc_func("objlist", ["string", "string"]) def seafile_list_repo_shared_to(from_user, repo_id): pass list_repo_shared_to = seafile_list_repo_shared_to @searpc_func("string", ["string", "string", "string", "string", "string", "string"]) def share_subdir_to_user(repo_id, path, owner, share_user, permission, passwd): pass @searpc_func("int", ["string", "string", "string", "string"]) def unshare_subdir_for_user(repo_id, path, owner, share_user): pass @searpc_func("int", ["string", "string", "string", "string", "string"]) def update_share_subdir_perm_for_user(repo_id, path, owner, share_user, permission): pass @searpc_func("object", ["string", "string", "string", "int"]) def get_shared_repo_by_path(repo_id, path, shared_to, is_org): pass @searpc_func("objlist", ["int", "string", "string", "int", "int"]) def seafile_list_org_share_repos(org_id, email, query_col, start, limit): pass list_org_share_repos = seafile_list_org_share_repos @searpc_func("int", ["string", "string", "string"]) def seafile_remove_share(repo_id, from_email, to_email): pass remove_share = seafile_remove_share @searpc_func("int", ["string", "string", "string", "string"]) def set_share_permission(repo_id, from_email, to_email, permission): pass # share repo to group @searpc_func("int", ["string", "int", "string", "string"]) def seafile_group_share_repo(repo_id, group_id, user_name, permisson): pass group_share_repo = seafile_group_share_repo @searpc_func("int", ["string", "int", "string"]) def seafile_group_unshare_repo(repo_id, group_id, user_name): pass group_unshare_repo = seafile_group_unshare_repo @searpc_func("string", ["string"]) def seafile_get_shared_groups_by_repo(repo_id): pass get_shared_groups_by_repo=seafile_get_shared_groups_by_repo @searpc_func("objlist", ["string", "string"]) def seafile_list_repo_shared_group(from_user, repo_id): pass list_repo_shared_group = seafile_list_repo_shared_group @searpc_func("object", ["string", "string", "int", "int"]) def get_group_shared_repo_by_path(repo_id, path, group_id, is_org): pass @searpc_func("objlist", ["string"]) def get_group_repos_by_user (user): pass @searpc_func("objlist", ["string", "int"]) def get_org_group_repos_by_user (user, org_id): pass @searpc_func("objlist", ["string", "string", "string"]) def seafile_get_shared_users_for_subdir(repo_id, path, from_user): pass get_shared_users_for_subdir = seafile_get_shared_users_for_subdir @searpc_func("objlist", ["string", "string", "string"]) def seafile_get_shared_groups_for_subdir(repo_id, path, from_user): pass get_shared_groups_for_subdir = seafile_get_shared_groups_for_subdir @searpc_func("string", ["string", "string", "string", "int", "string", "string"]) def share_subdir_to_group(repo_id, path, owner, share_group, permission, passwd): pass @searpc_func("int", ["string", "string", "string", "int"]) def unshare_subdir_for_group(repo_id, path, owner, share_group): pass @searpc_func("int", ["string", "string", "string", "int", "string"]) def update_share_subdir_perm_for_group(repo_id, path, owner, share_group, permission): pass @searpc_func("string", ["int"]) def seafile_get_group_repoids(group_id): pass get_group_repoids = seafile_get_group_repoids @searpc_func("objlist", ["int"]) def seafile_get_repos_by_group(group_id): pass get_repos_by_group = seafile_get_repos_by_group @searpc_func("objlist", ["string"]) def get_group_repos_by_owner(user_name): pass @searpc_func("string", ["string"]) def get_group_repo_owner(repo_id): pass @searpc_func("int", ["int", "string"]) def seafile_remove_repo_group(group_id, user_name): pass remove_repo_group = seafile_remove_repo_group @searpc_func("int", ["int", "string", "string"]) def set_group_repo_permission(group_id, repo_id, permission): pass # branch and commit @searpc_func("objlist", ["string"]) def seafile_branch_gets(repo_id): pass branch_gets = seafile_branch_gets @searpc_func("objlist", ["string", "int", "int"]) def seafile_get_commit_list(repo_id, offset, limit): pass get_commit_list = seafile_get_commit_list ###### Token #################### @searpc_func("int", ["string", "string", "string"]) def seafile_set_repo_token(repo_id, email, token): pass set_repo_token = seafile_set_repo_token @searpc_func("string", ["string", "string"]) def seafile_get_repo_token_nonnull(repo_id, email): """Get the token of the repo for the email user. If the token does not exist, a new one is generated and returned. """ pass get_repo_token_nonnull = seafile_get_repo_token_nonnull @searpc_func("string", ["string", "string"]) def seafile_generate_repo_token(repo_id, email): pass generate_repo_token = seafile_generate_repo_token @searpc_func("int", ["string", "string"]) def seafile_delete_repo_token(repo_id, token, user): pass delete_repo_token = seafile_delete_repo_token @searpc_func("objlist", ["string"]) def seafile_list_repo_tokens(repo_id): pass list_repo_tokens = seafile_list_repo_tokens @searpc_func("objlist", ["string"]) def seafile_list_repo_tokens_by_email(email): pass list_repo_tokens_by_email = seafile_list_repo_tokens_by_email @searpc_func("int", ["string", "string"]) def seafile_delete_repo_tokens_by_peer_id(email, user_id): pass delete_repo_tokens_by_peer_id = seafile_delete_repo_tokens_by_peer_id @searpc_func("int", ["string"]) def delete_repo_tokens_by_email(email): pass ###### quota ########## @searpc_func("int64", ["string"]) def seafile_get_user_quota_usage(user_id): pass get_user_quota_usage = seafile_get_user_quota_usage @searpc_func("int64", ["string"]) def seafile_get_user_share_usage(user_id): pass get_user_share_usage = seafile_get_user_share_usage @searpc_func("int64", ["int"]) def seafile_get_org_quota_usage(org_id): pass get_org_quota_usage = seafile_get_org_quota_usage @searpc_func("int64", ["int", "string"]) def seafile_get_org_user_quota_usage(org_id, user): pass get_org_user_quota_usage = seafile_get_org_user_quota_usage @searpc_func("int", ["string", "int64"]) def set_user_quota(user, quota): pass @searpc_func("int64", ["string"]) def get_user_quota(user): pass @searpc_func("int", ["int", "int64"]) def set_org_quota(org_id, quota): pass @searpc_func("int64", ["int"]) def get_org_quota(org_id): pass @searpc_func("int", ["int", "string", "int64"]) def set_org_user_quota(org_id, user, quota): pass @searpc_func("int64", ["int", "string"]) def get_org_user_quota(org_id, user): pass @searpc_func("int", ["string", "int64"]) def check_quota(repo_id, delta): pass @searpc_func("objlist", []) def list_user_quota_usage(): pass # password management @searpc_func("int", ["string", "string"]) def seafile_check_passwd(repo_id, magic): pass check_passwd = seafile_check_passwd @searpc_func("int", ["string", "string", "string"]) def seafile_set_passwd(repo_id, user, passwd): pass set_passwd = seafile_set_passwd @searpc_func("int", ["string", "string"]) def seafile_unset_passwd(repo_id, user): pass unset_passwd = seafile_unset_passwd # repo permission checking @searpc_func("string", ["string", "string"]) def check_permission(repo_id, user): pass # folder permission check @searpc_func("string", ["string", "string", "string"]) def check_permission_by_path(repo_id, path, user): pass # org repo @searpc_func("string", ["string", "string", "string", "string", "string", "int", "int"]) def seafile_create_org_repo(name, desc, user, passwd, magic, random_key, enc_version, org_id): pass create_org_repo = seafile_create_org_repo @searpc_func("int", ["string"]) def seafile_get_org_id_by_repo_id(repo_id): pass get_org_id_by_repo_id = seafile_get_org_id_by_repo_id @searpc_func("objlist", ["int", "int", "int"]) def seafile_get_org_repo_list(org_id, start, limit): pass get_org_repo_list = seafile_get_org_repo_list @searpc_func("int", ["int"]) def seafile_remove_org_repo_by_org_id(org_id): pass remove_org_repo_by_org_id = seafile_remove_org_repo_by_org_id @searpc_func("objlist", ["int", "string"]) def list_org_repos_by_owner(org_id, user): pass @searpc_func("string", ["string"]) def get_org_repo_owner(repo_id): pass # org group repo @searpc_func("int", ["string", "int", "int", "string", "string"]) def add_org_group_repo(repo_id, org_id, group_id, owner, permission): pass @searpc_func("int", ["string", "int", "int"]) def del_org_group_repo(repo_id, org_id, group_id): pass @searpc_func("string", ["int", "int"]) def get_org_group_repoids(org_id, group_id): pass @searpc_func("string", ["int", "int", "string"]) def get_org_group_repo_owner(org_id, group_id, repo_id): pass @searpc_func("objlist", ["int", "string"]) def get_org_group_repos_by_owner(org_id, user): pass @searpc_func("string", ["int", "string"]) def get_org_groups_by_repo(org_id, repo_id): pass @searpc_func("int", ["int", "int", "string", "string"]) def set_org_group_repo_permission(org_id, group_id, repo_id, permission): pass # inner pub repo @searpc_func("int", ["string", "string"]) def set_inner_pub_repo(repo_id, permission): pass @searpc_func("int", ["string"]) def unset_inner_pub_repo(repo_id): pass @searpc_func("objlist", []) def list_inner_pub_repos(): pass @searpc_func("objlist", ["string"]) def list_inner_pub_repos_by_owner(user): pass @searpc_func("int64", []) def count_inner_pub_repos(): pass @searpc_func("int", ["string"]) def is_inner_pub_repo(repo_id): pass # org inner pub repo @searpc_func("int", ["int", "string", "string"]) def set_org_inner_pub_repo(org_id, repo_id, permission): pass @searpc_func("int", ["int", "string"]) def unset_org_inner_pub_repo(org_id, repo_id): pass @searpc_func("objlist", ["int"]) def list_org_inner_pub_repos(org_id): pass @searpc_func("objlist", ["int", "string"]) def list_org_inner_pub_repos_by_owner(org_id, user): pass @searpc_func("int", ["string", "int"]) def set_repo_history_limit(repo_id, days): pass @searpc_func("int", ["string"]) def get_repo_history_limit(repo_id): pass @searpc_func("int", ["string", "int64"]) def set_repo_valid_since(repo_id, timestamp): pass # virtual repo @searpc_func("string", ["string", "string", "string", "string", "string", "string"]) def create_virtual_repo(origin_repo_id, path, repo_name, repo_desc, owner, passwd=''): pass @searpc_func("objlist", ["string"]) def get_virtual_repos_by_owner(owner): pass @searpc_func("object", ["string", "string", "string"]) def get_virtual_repo(origin_repo, path, owner): pass # system default library @searpc_func("string", []) def get_system_default_repo_id(): pass # Change password @searpc_func("int", ["string", "string", "string", "string"]) def seafile_change_repo_passwd(repo_id, old_passwd, new_passwd, user): pass change_repo_passwd = seafile_change_repo_passwd # Upgrade repo enc algorithm @searpc_func("int", ["string", "string", "string", "string", "string"]) def seafile_upgrade_repo_pwd_hash_algorithm (repo_id, user, passwd, pwd_hash_algo, pwd_hash_params): pass upgrade_repo_pwd_hash_algorithm = seafile_upgrade_repo_pwd_hash_algorithm # Clean trash @searpc_func("int", ["string", "int"]) def clean_up_repo_history(repo_id, keep_days): pass # Trashed repos @searpc_func("objlist", ["int", "int"]) def get_trash_repo_list(start, limit): pass @searpc_func("int", ["string"]) def del_repo_from_trash(repo_id): pass @searpc_func("int", ["string"]) def restore_repo_from_trash(repo_id): pass @searpc_func("objlist", ["string"]) def get_trash_repos_by_owner(owner): pass @searpc_func("int", []) def empty_repo_trash(): pass @searpc_func("int", ["string"]) def empty_repo_trash_by_owner(owner): pass @searpc_func("object", ["string"]) def empty_repo_trash_by_owner(owner): pass @searpc_func("object", ["int", "string", "string"]) def generate_magic_and_random_key(enc_version, repo_id, password): pass @searpc_func("int64", []) def get_total_file_number(): pass @searpc_func("int64", []) def get_total_storage(): pass @searpc_func("object", ["string", "string"]) def get_file_count_info_by_path(repo_id, path): pass @searpc_func("string", ["string"]) def get_trash_repo_owner(repo_id): pass @searpc_func("int64", ["string", "string"]) def seafile_get_upload_tmp_file_offset(repo_id, file_path): pass get_upload_tmp_file_offset = seafile_get_upload_tmp_file_offset @searpc_func("int", ["string", "string", "string", "string"]) def seafile_mkdir_with_parents (repo_id, parent_dir, relative_path, username): pass mkdir_with_parents = seafile_mkdir_with_parents @searpc_func("int", ["string", "string"]) def get_server_config_int (group, key): pass @searpc_func("int", ["string", "string", "int"]) def set_server_config_int (group, key, value): pass @searpc_func("int64", ["string", "string"]) def get_server_config_int64 (group, key): pass @searpc_func("int", ["string", "string", "int64"]) def set_server_config_int64 (group, key, value): pass @searpc_func("string", ["string", "string"]) def get_server_config_string (group, key): pass @searpc_func("int", ["string", "string", "string"]) def set_server_config_string (group, key, value): pass @searpc_func("int", ["string", "string"]) def get_server_config_boolean (group, key): pass @searpc_func("int", ["string", "string", "int"]) def set_server_config_boolean (group, key, value): pass @searpc_func("int", ["string", "int"]) def repo_has_been_shared (repo_id, including_groups): pass @searpc_func("objlist", ["string"]) def get_shared_users_by_repo (repo_id): pass @searpc_func("objlist", ["int", "string"]) def org_get_shared_users_by_repo (org_id, repo_id): pass @searpc_func("string", ["string", "string", "string", "int"]) def convert_repo_path(repo_id, path, user, is_org): pass # repo status @searpc_func("int", ["string", "int"]) def set_repo_status(repo_id, status): pass @searpc_func("int", ["string"]) def get_repo_status(repo_id): pass # token for web access to repo @searpc_func("string", ["string", "string", "string", "string", "int"]) def seafile_web_get_access_token(repo_id, obj_id, op, username, use_onetime=1): pass web_get_access_token = seafile_web_get_access_token @searpc_func("object", ["string"]) def seafile_web_query_access_token(token): pass web_query_access_token = seafile_web_query_access_token @searpc_func("string", ["string"]) def seafile_query_zip_progress(token): pass query_zip_progress = seafile_query_zip_progress @searpc_func("int", ["string"]) def cancel_zip_task(token): pass ###### GC #################### @searpc_func("int", []) def seafile_gc(): pass gc = seafile_gc @searpc_func("int", []) def seafile_gc_get_progress(): pass gc_get_progress = seafile_gc_get_progress # password management @searpc_func("int", ["string", "string"]) def seafile_is_passwd_set(repo_id, user): pass is_passwd_set = seafile_is_passwd_set @searpc_func("object", ["string", "string"]) def seafile_get_decrypt_key(repo_id, user): pass get_decrypt_key = seafile_get_decrypt_key # Copy tasks @searpc_func("object", ["string"]) def get_copy_task(task_id): pass @searpc_func("int", ["string"]) def cancel_copy_task(task_id): pass # event @searpc_func("int", ["string", "string"]) def publish_event(channel, content): pass @searpc_func("json", ["string"]) def pop_event(channel): pass @searpc_func("objlist", ["string", "string"]) def search_files(self, repo_id, search_str): pass @searpc_func("objlist", ["string", "string", "string"]) def search_files_by_path(self, repo_id, path, search_str): pass #user management @searpc_func("int", ["string", "string", "int", "int"]) def add_emailuser(self, email, passwd, is_staff, is_active): pass @searpc_func("int", ["string", "string"]) def remove_emailuser(self, source, email): pass @searpc_func("int", ["string", "string"]) def validate_emailuser(self, email, passwd): pass @searpc_func("object", ["string"]) def get_emailuser(self, email): pass @searpc_func("object", ["string"]) def get_emailuser_with_import(self, email): pass @searpc_func("object", ["int"]) def get_emailuser_by_id(self, user_id): pass @searpc_func("objlist", ["string", "int", "int", "string"]) def get_emailusers(self, source, start, limit, status): pass @searpc_func("objlist", ["string", "string", "int", "int"]) def search_emailusers(self, source, email_patt, start, limit): pass @searpc_func("int64", ["string"]) def count_emailusers(self, source): pass @searpc_func("int64", ["string"]) def count_inactive_emailusers(self, source): pass @searpc_func("objlist", ["string"]) def filter_emailusers_by_emails(self): pass @searpc_func("int", ["string", "int", "string", "int", "int"]) def update_emailuser(self, source, user_id, password, is_staff, is_active): pass @searpc_func("int", ["string", "string"]) def update_role_emailuser(self, email, role): pass @searpc_func("objlist", []) def get_superusers(self): pass @searpc_func("objlist", ["string", "string"]) def get_emailusers_in_list(self, source, user_list): pass @searpc_func("int", ["string", "string"]) def update_emailuser_id (self, old_email, new_email): pass #group management @searpc_func("int", ["string", "string", "string", "int"]) def create_group(self, group_name, user_name, gtype, parent_group_id): pass @searpc_func("int", ["int", "string", "string", "int"]) def create_org_group(self, org_id, group_name, user_name, parent_group_id): pass @searpc_func("int", ["int"]) def remove_group(self, group_id): pass @searpc_func("int", ["int", "string", "string"]) def group_add_member(self, group_id, user_name, member_name): pass @searpc_func("int", ["int", "string", "string"]) def group_remove_member(self, group_id, user_name, member_name): pass @searpc_func("int", ["int", "string"]) def group_set_admin(self, group_id, member_name): pass @searpc_func("int", ["int", "string"]) def group_unset_admin(self, group_id, member_name): pass @searpc_func("int", ["int", "string"]) def set_group_name(self, group_id, group_name): pass @searpc_func("int", ["int", "string"]) def quit_group(self, group_id, user_name): pass @searpc_func("objlist", ["string", "int"]) def get_groups(self, user_name, return_ancestors): pass @searpc_func("objlist", []) def list_all_departments(self): pass @searpc_func("objlist", ["int", "int", "string"]) def get_all_groups(self, start, limit, source): pass @searpc_func("objlist", ["int"]) def get_ancestor_groups(self, group_id): pass @searpc_func("objlist", ["int"]) def get_top_groups(self, including_org): pass @searpc_func("objlist", ["int"]) def get_child_groups(self, group_id): pass @searpc_func("objlist", ["int"]) def get_descendants_groups(self, group_id): pass @searpc_func("object", ["int"]) def get_group(self, group_id): pass @searpc_func("objlist", ["int"]) def get_group_members(self, group_id): pass @searpc_func("objlist", ["int", "string"]) def get_members_with_prefix(self, group_id, prefix): pass @searpc_func("int", ["int", "string", "int"]) def check_group_staff(self, group_id, username, in_structure): pass @searpc_func("int", ["string"]) def remove_group_user(self, username): pass @searpc_func("int", ["int", "string", "int"]) def is_group_user(self, group_id, user, in_structure): pass @searpc_func("int", ["int", "string"]) def set_group_creator(self, group_id, user_name): pass @searpc_func("objlist", ["string", "int", "int"]) def search_groups(self, group_patt, start, limit): pass @searpc_func("objlist", ["int", "string"]) def search_group_members(self, group_id, pattern): pass @searpc_func("objlist", ["string"]) def get_groups_members(self, group_ids): pass #org management @searpc_func("int", ["string", "string", "string"]) def create_org(self, org_name, url_prefix, creator): pass @searpc_func("int", ["int"]) def remove_org(self, org_id): pass @searpc_func("objlist", ["int", "int"]) def get_all_orgs(self, start, limit): pass @searpc_func("int64", []) def count_orgs(self): pass @searpc_func("object", ["string"]) def get_org_by_url_prefix(self, url_prefix): pass @searpc_func("object", ["string"]) def get_org_by_id(self, org_id): pass @searpc_func("int", ["int", "string", "int"]) def add_org_user(self, org_id, email, is_staff): pass @searpc_func("int", ["int", "string"]) def remove_org_user(self, org_id, email): pass @searpc_func("objlist", ["string"]) def get_orgs_by_user(self, email): pass @searpc_func("objlist", ["string", "int", "int"]) def get_org_emailusers(self, url_prefix, start, limit): pass @searpc_func("int", ["int", "int"]) def add_org_group(self, org_id, group_id): pass @searpc_func("int", ["int", "int"]) def remove_org_group(self, org_id, group_id): pass @searpc_func("int", ["int"]) def is_org_group(self, group_id): pass @searpc_func("int", ["int"]) def get_org_id_by_group(self, group_id): pass @searpc_func("objlist", ["int", "int", "int"]) def get_org_groups(self, org_id, start, limit): pass @searpc_func("objlist", ["string", "int"]) def get_org_groups_by_user (self, user, org_id): pass @searpc_func("objlist", ["int"]) def get_org_top_groups(self, org_id): pass @searpc_func("int", ["int", "string"]) def org_user_exists(self, org_id, email): pass @searpc_func("int", ["int", "string"]) def is_org_staff(self, org_id, user): pass @searpc_func("int", ["int", "string"]) def set_org_staff(self, org_id, user): pass @searpc_func("int", ["int", "string"]) def unset_org_staff(self, org_id, user): pass @searpc_func("int", ["int", "string"]) def set_org_name(self, org_id, org_name): pass @searpc_func("string", ["string"]) def get_primary_id(self, email): pass ================================================ FILE: python/seaserv/Makefile.am ================================================ seaservdir=${pyexecdir}/seaserv seaserv_PYTHON = __init__.py service.py api.py ================================================ FILE: python/seaserv/__init__.py ================================================ from . import service from .service import seafserv_threaded_rpc, ccnet_threaded_rpc from .service import send_command, check_quota, web_get_access_token, \ unset_repo_passwd, get_user_quota_usage, get_user_share_usage, \ get_user_quota from .service import get_emailusers, count_emailusers, \ get_emailuser_with_import from .service import get_org_groups, get_personal_groups_by_user, \ get_group_repoids, get_personal_groups, list_share_repos, remove_share, \ check_group_staff, remove_group_user, get_group, get_org_id_by_group, \ get_group_members, get_shared_groups_by_repo, is_group_user, \ get_org_group_repos, get_group_repos, get_org_groups_by_user, is_org_group,\ del_org_group_repo, get_org_groups_by_repo, get_org_group_repoids, \ get_group_repos_by_owner, unshare_group_repo from .service import get_repos, get_repo, get_commits, get_branches, remove_repo, \ get_org_repos, is_repo_owner, create_org_repo, is_inner_pub_repo, \ list_org_inner_pub_repos, get_org_id_by_repo_id, list_org_shared_repos, \ list_personal_shared_repos, is_personal_repo, list_inner_pub_repos, \ is_org_repo_owner, get_org_repo_owner, is_org_repo, get_file_size,\ list_personal_repos_by_owner, get_repo_token_nonnull, get_repo_owner, \ server_repo_size, get_file_id_by_path, get_commit, set_repo_history_limit,\ get_repo_history_limit, list_inner_pub_repos_by_owner, unset_inner_pub_repo,\ count_inner_pub_repos, edit_repo, list_dir_by_path, create_repo, remove_repo from .service import get_binding_peerids, is_valid_filename, check_permission,\ is_passwd_set from .service import create_org, get_orgs_by_user, get_org_by_url_prefix, \ get_user_current_org, add_org_user, remove_org_user, get_org_by_id, \ get_org_id_by_repo_id, is_org_staff, get_org_users_by_url_prefix, \ org_user_exists, list_org_repos_by_owner from .service import get_related_users_by_repo, get_related_users_by_org_repo from .service import post_empty_file, del_file from .service import \ MAX_UPLOAD_FILE_SIZE, MAX_DOWNLOAD_DIR_SIZE, FILE_SERVER_ROOT, \ CALC_SHARE_USAGE, FILE_SERVER_PORT, \ SEAFILE_CENTRAL_CONF_DIR, USE_GO_FILESERVER from .service import send_message from .api import seafile_api, ccnet_api ================================================ FILE: python/seaserv/api.py ================================================ from .service import seafserv_threaded_rpc, ccnet_threaded_rpc from pysearpc import SearpcError import json """ General rules for return values and exception handling of Seafile python API: - Read operations return corresponding values. Raises exceptions on parameter errors or I/O errors in seaf-server. - Write or set operations return 0 on success, -1 on error. On error, an exceptioin will be raised. All paths in parameters can be in absolute path format (like '/test') or relative path format (like 'test'). The API can handle both formats. """ REPO_STATUS_NORMAL = 0 REPO_STATUS_READ_ONLY = 1 class SeafileAPI(object): def __init__(self): pass # fileserver token def get_fileserver_access_token(self, repo_id, obj_id, op, username, use_onetime=True): """Generate token for access file/dir in fileserver op: the operation, can be 'view', 'download', 'download-dir', 'downloadblks', 'upload', 'update', 'upload-blks-api', 'upload-blks-aj', 'update-blks-api', 'update-blks-aj' Return: the access token in string """ onetime = 1 if bool(use_onetime) else 0 return seafserv_threaded_rpc.web_get_access_token(repo_id, obj_id, op, username, onetime) def query_fileserver_access_token(self, token): """Get the WebAccess object token: the access token in string Return: the WebAccess object (lib/webaccess.vala) """ return seafserv_threaded_rpc.web_query_access_token(token) def query_zip_progress(self, token): """Query zip progress for download-dir, download-multi token: obtained by get_fileserver_access_token Return: json formated string `{"zipped":, "total":}`, otherwise None. """ return seafserv_threaded_rpc.query_zip_progress(token) def cancel_zip_task(self, token): return seafserv_threaded_rpc.cancel_zip_task(token) # password def is_password_set(self, repo_id, username): """ Return non-zero if True, otherwise 0. """ return seafserv_threaded_rpc.is_passwd_set(repo_id, username) def get_decrypt_key(self, repo_id, username): """ Return: a CryptKey object (lib/crypt.vala) """ return seafserv_threaded_rpc.get_decrypt_key(repo_id, username) def change_repo_passwd(self, repo_id, old_passwd, new_passwd, user): return seafserv_threaded_rpc.change_repo_passwd(repo_id, old_passwd, new_passwd, user) def upgrade_repo_pwd_hash_algorithm (self, repo_id, user, passwd, pwd_hash_algo, pwd_hash_params): return seafserv_threaded_rpc.upgrade_repo_pwd_hash_algorithm (repo_id, user, passwd, pwd_hash_algo, pwd_hash_params) def check_passwd(self, repo_id, magic): return seafserv_threaded_rpc.check_passwd(repo_id, magic) def set_passwd(self, repo_id, user, passwd): return seafserv_threaded_rpc.set_passwd(repo_id, user, passwd) def unset_passwd(self, repo_id, user): return seafserv_threaded_rpc.unset_passwd(repo_id, user) def generate_magic_and_random_key(self, enc_version, repo_id, password): return seafserv_threaded_rpc.generate_magic_and_random_key(enc_version, repo_id, password) # repo manipulation def create_repo(self, name, desc, username, passwd=None, enc_version=2, storage_id=None, pwd_hash_algo=None, pwd_hash_params=None): return seafserv_threaded_rpc.create_repo(name, desc, username, passwd, enc_version, pwd_hash_algo, pwd_hash_params) def create_enc_repo(self, repo_id, name, desc, username, magic, random_key, salt, enc_version, pwd_hash=None, pwd_hash_algo=None, pwd_hash_params=None): return seafserv_threaded_rpc.create_enc_repo(repo_id, name, desc, username, magic, random_key, salt, enc_version, pwd_hash, pwd_hash_algo, pwd_hash_params) def get_repos_by_id_prefix(self, id_prefix, start=-1, limit=-1): """ Return: a list of Repo objects """ return seafserv_threaded_rpc.get_repos_by_id_prefix(id_prefix, start, limit) def get_repo(self, repo_id): """ Return: a Repo object (lib/repo.vala) """ return seafserv_threaded_rpc.get_repo(repo_id) def remove_repo(self, repo_id): return seafserv_threaded_rpc.remove_repo(repo_id) def get_repo_list(self, start, limit, order_by=None, ret_virt_repo=False): """ Return: a list of Repo objects (lib/repo.vala) """ return seafserv_threaded_rpc.get_repo_list(start, limit, order_by, 1 if ret_virt_repo else 0) def count_repos(self): return seafserv_threaded_rpc.count_repos() def edit_repo(self, repo_id, name, description, username): return seafserv_threaded_rpc.edit_repo(repo_id, name, description, username) def is_repo_owner(self, username, repo_id): """ Return 1 if True, otherwise 0. """ return seafserv_threaded_rpc.is_repo_owner(username, repo_id) def set_repo_owner(self, email, repo_id): return seafserv_threaded_rpc.set_repo_owner(email, repo_id) def get_repo_owner(self, repo_id): """ Return: repo owner in string """ return seafserv_threaded_rpc.get_repo_owner(repo_id) def get_owned_repo_list(self, username, ret_corrupted=False, start=-1, limit=-1): """ Return: a list of Repo objects """ return seafserv_threaded_rpc.list_owned_repos(username, 1 if ret_corrupted else 0, start, limit) def search_repos_by_name(self, name): return seafserv_threaded_rpc.search_repos_by_name(name) def get_orphan_repo_list(self): return seafserv_threaded_rpc.get_orphan_repo_list() def get_repo_size(self, repo_id): return seafserv_threaded_rpc.server_repo_size(repo_id) def revert_repo(self, repo_id, commit_id, username): return seafserv_threaded_rpc.revert_on_server(repo_id, commit_id, username) def diff_commits(self, repo_id, old_commit, new_commit, fold_dir_diff = 1): """ Return: a list of DiffEntry objects (lib/repo.vala) """ return seafserv_threaded_rpc.get_diff(repo_id, old_commit, new_commit, fold_dir_diff) def get_commit_list(self, repo_id, offset, limit): """ Return: a list of Commit objects (lib/commit.vala) """ return seafserv_threaded_rpc.get_commit_list(repo_id, offset, limit) def get_commit(self, repo_id, repo_version, cmt_id): """ Get a commit. """ try: ret = seafserv_threaded_rpc.get_commit(repo_id, repo_version, cmt_id) except SearpcError: ret = None return ret def get_system_default_repo_id (self): return seafserv_threaded_rpc.get_system_default_repo_id() def get_org_id_by_repo_id (self, repo_id): return seafserv_threaded_rpc.get_org_id_by_repo_id(repo_id) def set_repo_status (self, repo_id, status): return seafserv_threaded_rpc.set_repo_status(repo_id, status) def get_repo_status (self, repo_id): return seafserv_threaded_rpc.get_repo_status(repo_id) # File property and dir listing def is_valid_filename(self, repo_id, filename): """ Return: 0 on invalid; 1 on valid. """ return seafserv_threaded_rpc.is_valid_filename(repo_id, filename) def get_file_size(self, store_id, version, file_id): return seafserv_threaded_rpc.get_file_size(store_id, version, file_id) def get_dir_size(self, store_id, version, dir_id): """ Return the size of a dir. It needs to recursively calculate the size of the dir. It can cause great delay before returning. Use with caution! """ return seafserv_threaded_rpc.get_dir_size(store_id, version, dir_id) def get_file_id_by_path(self, repo_id, path): """ Returns None if path not found. Only raise exception on parameter or IO error. """ return seafserv_threaded_rpc.get_file_id_by_path(repo_id, path) def get_file_id_by_commit_and_path(self, repo_id, commit_id, path): return seafserv_threaded_rpc.get_file_id_by_commit_and_path(repo_id, commit_id, path) def get_dirent_by_path(self, repo_id, path): """ Return: a Dirent object (lib/dirent.vala) """ return seafserv_threaded_rpc.get_dirent_by_path(repo_id, path) def list_file_by_file_id(self, repo_id, file_id, offset=-1, limit=-1): # deprecated, use list_blocks_by_file_id instead. return seafserv_threaded_rpc.list_file_blocks(repo_id, file_id, offset, limit) def list_blocks_by_file_id(self, repo_id, file_id, offset=-1, limit=-1): """ list block ids of a file. Return: a string containing block list. Each id is seperated by '\n' """ return seafserv_threaded_rpc.list_file_blocks(repo_id, file_id, offset, limit) def get_dir_id_by_path(self, repo_id, path): return seafserv_threaded_rpc.get_dir_id_by_path(repo_id, path) def list_dir_by_dir_id(self, repo_id, dir_id, offset=-1, limit=-1): """ Return: a list of Dirent objects. The objects are sorted as follows: - Directories are always before files - Entries are sorted by names in ascending order """ return seafserv_threaded_rpc.list_dir(repo_id, dir_id, offset, limit) def list_dir_by_path(self, repo_id, path, offset=-1, limit=-1): dir_id = seafserv_threaded_rpc.get_dir_id_by_path(repo_id, path) if dir_id is None: return None return seafserv_threaded_rpc.list_dir(repo_id, dir_id, offset, limit) def list_dir_by_commit_and_path(self, repo_id, commit_id, path, offset=-1, limit=-1): dir_id = seafserv_threaded_rpc.get_dir_id_by_commit_and_path(repo_id, commit_id, path) if dir_id is None: return None return seafserv_threaded_rpc.list_dir(repo_id, dir_id, offset, limit) def get_dir_id_by_commit_and_path(self, repo_id, commit_id, path): return seafserv_threaded_rpc.get_dir_id_by_commit_and_path(repo_id, commit_id, path) def list_dir_with_perm(self, repo_id, dir_path, dir_id, user, offset=-1, limit=-1): return seafserv_threaded_rpc.list_dir_with_perm (repo_id, dir_path, dir_id, user, offset, limit) def mkdir_with_parents (self, repo_id, parent_dir, relative_path, username): return seafserv_threaded_rpc.mkdir_with_parents(repo_id, parent_dir, relative_path, username) def get_file_count_info_by_path(self, repo_id, path): return seafserv_threaded_rpc.get_file_count_info_by_path(repo_id, path) def get_total_storage (self): return seafserv_threaded_rpc.get_total_storage() def get_total_file_number (self): return seafserv_threaded_rpc.get_total_file_number() # file/dir operations def post_file(self, repo_id, tmp_file_path, parent_dir, filename, username): """Add a file to a directory""" return seafserv_threaded_rpc.post_file(repo_id, tmp_file_path, parent_dir, filename, username) def post_empty_file(self, repo_id, parent_dir, filename, username): return seafserv_threaded_rpc.post_empty_file(repo_id, parent_dir, filename, username) def put_file(self, repo_id, tmp_file_path, parent_dir, filename, username, head_id): """Update an existing file head_id: the original commit id of the old file """ return seafserv_threaded_rpc.put_file(repo_id, tmp_file_path, parent_dir, filename, username, head_id) ''' If you want to delete multiple files in a batch, @filename should be json array ''' def del_file(self, repo_id, parent_dir, filename, username): return seafserv_threaded_rpc.del_file(repo_id, parent_dir, filename, username) def batch_del_files(self, repo_id, filepaths, username): return seafserv_threaded_rpc.batch_del_files(repo_id, filepaths, username) ''' If you want to move or copy multiple files in a batch, @src_filename and @dst_filename should be json array, make sure the number of files in @src_filename and @dst_filename parameters match ''' def copy_file(self, src_repo, src_dir, src_filename, dst_repo, dst_dir, dst_filename, username, need_progress, synchronous=0): return seafserv_threaded_rpc.copy_file(src_repo, src_dir, src_filename, dst_repo, dst_dir, dst_filename, username, need_progress, synchronous) def move_file(self, src_repo, src_dir, src_filename, dst_repo, dst_dir, dst_filename, replace, username, need_progress, synchronous=0): return seafserv_threaded_rpc.move_file(src_repo, src_dir, src_filename, dst_repo, dst_dir, dst_filename, replace, username, need_progress, synchronous) def get_copy_task(self, task_id): return seafserv_threaded_rpc.get_copy_task(task_id) def cancel_copy_task(self, task_id): return seafserv_threaded_rpc.cancel_copy_task(task_id) def rename_file(self, repo_id, parent_dir, oldname, newname, username): return seafserv_threaded_rpc.rename_file(repo_id, parent_dir, oldname, newname, username) def post_dir(self, repo_id, parent_dir, dirname, username): """Add a directory""" return seafserv_threaded_rpc.post_dir(repo_id, parent_dir, dirname, username) def revert_file(self, repo_id, commit_id, path, username): return seafserv_threaded_rpc.revert_file(repo_id, commit_id, path, username) def revert_dir(self, repo_id, commit_id, path, username): return seafserv_threaded_rpc.revert_dir(repo_id, commit_id, path, username) def get_deleted(self, repo_id, show_days, path='/', scan_stat=None, limit=100): """ Get list of deleted paths. @show_days: return deleted path in the last @show_days @path: return deleted files under this path. The path will be recursively traversed. @scan_stat: An opaque status returned by the last call. In the first call, None must be passed. The last entry of the result list contains a 'scan_stat' attribute. In the next call, pass in the returned 'scan_stat'. @limit: Advisory maximum number of commits to traverse. Sometimes more than @limit commits will be traversed. Return a list of DeletedEntry objects (lib/repo.vala). If no more deleted entries can be returned within the given time frame (specified by @show_days) or all deleted entries in the history have been returned, a list with a single entry will be returned. The 'scan_stat' attribute of this entry is set to None. """ return seafserv_threaded_rpc.get_deleted(repo_id, show_days, path, scan_stat, limit) def get_file_revisions(self, repo_id, commit_id, path, limit): """ Get revisions of a file. @commit_id: start traversing from this commit @limit: maximum number of commits to traverse when looking for revisions Return a list of Commit objects (lib/commit.vala) related to the revisions. A few special attributes are added to the commit object: @rev_file_id: id of the file revision @rev_file_size: size of the file revision @rev_renamed_old_path: set if this revision is made by a rename operation. It's set to the old path before rename. @next_start_commit: commit_id for next page. An extra commit which only contains @next_start_commit will be appended to the list. """ return seafserv_threaded_rpc.list_file_revisions(repo_id, commit_id, path, limit) # This api is slow and should only be used for version 0 repos. def get_files_last_modified(self, repo_id, parent_dir, limit): """Get last modification time for files in a dir limit: the max number of commits to analyze """ return seafserv_threaded_rpc.calc_files_last_modified(repo_id, parent_dir, limit) def get_repo_history_limit(self, repo_id): """ Return repo history limit in days. Returns -1 if it's unlimited. """ return seafserv_threaded_rpc.get_repo_history_limit(repo_id) def set_repo_history_limit(self, repo_id, days): """ Set repo history limit in days. Pass -1 if set to unlimited. """ return seafserv_threaded_rpc.set_repo_history_limit(repo_id, days) def set_repo_valid_since(self, repo_id, timestamp): return seafserv_threaded_rpc.set_repo_valid_since(repo_id, timestamp) def check_repo_blocks_missing(self, repo_id, blklist): return seafserv_threaded_rpc.check_repo_blocks_missing(repo_id, blklist) def get_upload_tmp_file_offset (self, repo_id, file_path): return seafserv_threaded_rpc.get_upload_tmp_file_offset (repo_id, file_path) # file lock def check_file_lock(self, repo_id, path, user): """ Always return 0 since CE doesn't support file locking. """ return 0 # share repo to user def share_repo(self, repo_id, from_username, to_username, permission): return seafserv_threaded_rpc.add_share(repo_id, from_username, to_username, permission) def remove_share(self, repo_id, from_username, to_username): return seafserv_threaded_rpc.remove_share(repo_id, from_username, to_username) def set_share_permission(self, repo_id, from_username, to_username, permission): return seafserv_threaded_rpc.set_share_permission(repo_id, from_username, to_username, permission) def share_subdir_to_user(self, repo_id, path, owner, share_user, permission, passwd=''): return seafserv_threaded_rpc.share_subdir_to_user(repo_id, path, owner, share_user, permission, passwd) def unshare_subdir_for_user(self, repo_id, path, owner, share_user): return seafserv_threaded_rpc.unshare_subdir_for_user(repo_id, path, owner, share_user) def update_share_subdir_perm_for_user(self, repo_id, path, owner, share_user, permission): return seafserv_threaded_rpc.update_share_subdir_perm_for_user(repo_id, path, owner, share_user, permission) def get_shared_repo_by_path(self, repo_id, path, shared_to, is_org=False): """ If path is NULL, 'repo_id' represents for the repo we want, otherwise, 'repo_id' represents for the origin repo, return virtual repo """ return seafserv_threaded_rpc.get_shared_repo_by_path(repo_id, path, shared_to, 1 if is_org else 0) def get_share_out_repo_list(self, username, start, limit): """ Get repo list shared by this user. Return: a list of Repo objects """ return seafserv_threaded_rpc.list_share_repos(username, "from_email", start, limit) def get_share_in_repo_list(self, username, start, limit): """ Get repo list shared to this user. """ return seafserv_threaded_rpc.list_share_repos(username, "to_email", start, limit) def list_repo_shared_to(self, from_user, repo_id): """ Get user list this repo is shared to. Return: a list of SharedUser objects (lib/repo.vala) """ return seafserv_threaded_rpc.list_repo_shared_to(from_user, repo_id) def repo_has_been_shared(self, repo_id, including_groups=False): return True if seafserv_threaded_rpc.repo_has_been_shared(repo_id, 1 if including_groups else 0) else False # share repo to group def group_share_repo(self, repo_id, group_id, username, permission): # deprecated, use ``set_group_repo`` return seafserv_threaded_rpc.group_share_repo(repo_id, group_id, username, permission) def set_group_repo(self, repo_id, group_id, username, permission): return seafserv_threaded_rpc.group_share_repo(repo_id, group_id, username, permission) def group_unshare_repo(self, repo_id, group_id, username): # deprecated, use ``unset_group_repo`` return seafserv_threaded_rpc.group_unshare_repo(repo_id, group_id, username) def unset_group_repo(self, repo_id, group_id, username): return seafserv_threaded_rpc.group_unshare_repo(repo_id, group_id, username) def get_shared_group_ids_by_repo(self, repo_id): group_ids = seafserv_threaded_rpc.get_shared_groups_by_repo(repo_id) if not group_ids: return [] ret = [] for group_id in group_ids.split('\n'): if not group_id: continue ret.append(group_id) return ret def list_repo_shared_group(self, from_user, repo_id): # deprecated, use list_repo_shared_group_by_user instead. return seafserv_threaded_rpc.list_repo_shared_group(from_user, repo_id) def get_group_shared_repo_by_path (self, repo_id, path, group_id, is_org=False): """ If path is NULL, 'repo_id' represents for the repo we want, otherwise, 'repo_id' represents for the origin repo, return virtual repo """ return seafserv_threaded_rpc.get_group_shared_repo_by_path(repo_id, path, group_id, 1 if is_org else 0) def get_group_repos_by_user (self, user): """ Return all the repos in all groups that the @user belongs to. """ return seafserv_threaded_rpc.get_group_repos_by_user(user) def get_org_group_repos_by_user (self, user, org_id): return seafserv_threaded_rpc.get_org_group_repos_by_user(user, org_id) def list_repo_shared_group_by_user(self, from_user, repo_id): """ Return: a list of SharedGroup objects (lib/repo.vala) """ return seafserv_threaded_rpc.list_repo_shared_group(from_user, repo_id) def share_subdir_to_group(self, repo_id, path, owner, share_group, permission, passwd=''): return seafserv_threaded_rpc.share_subdir_to_group(repo_id, path, owner, share_group, permission, passwd) def unshare_subdir_for_group(self, repo_id, path, owner, share_group): return seafserv_threaded_rpc.unshare_subdir_for_group(repo_id, path, owner, share_group) def update_share_subdir_perm_for_group(self, repo_id, path, owner, share_group, permission): return seafserv_threaded_rpc.update_share_subdir_perm_for_group(repo_id, path, owner, share_group, permission) def get_group_repoids(self, group_id): """ Return the list of group repo ids """ repo_ids = seafserv_threaded_rpc.get_group_repoids(group_id) if not repo_ids: return [] l = [] for repo_id in repo_ids.split("\n"): if repo_id == '': continue l.append(repo_id) return l def get_group_repo_list(self, group_id): # deprecated, use get_repos_by_group instead. ret = [] for repo_id in self.get_group_repoids(group_id): r = self.get_repo(repo_id) if r is None: continue ret.append(r) return ret def get_repos_by_group(self, group_id): """ Return: a list of Repo objects """ return seafserv_threaded_rpc.get_repos_by_group(group_id) def get_group_repos_by_owner(self, username): """ Get all repos a user share to any group Return: a list of Repo objects """ return seafserv_threaded_rpc.get_group_repos_by_owner(username) def remove_group_repos_by_owner(self, group_id, username): """ Unshare all repos a user shared to a group. """ return seafserv_threaded_rpc.remove_repo_group(group_id, username) def remove_group_repos(self, group_id): """ Remove all repos under group. Return: 0 success; -1 failed """ return seafserv_threaded_rpc.remove_repo_group(group_id, None) def set_group_repo_permission(self, group_id, repo_id, permission): return seafserv_threaded_rpc.set_group_repo_permission(group_id, repo_id, permission) def get_shared_users_for_subdir(self, repo_id, path, from_user): """ Get all users a path is shared to. Return: a list of SharedUser objects. """ return seafserv_threaded_rpc.get_shared_users_for_subdir(repo_id, path, from_user) def get_shared_groups_for_subdir(self, repo_id, path, from_user): """ Get all groups a path is shared to. Return: a list of SharedGroup objects. """ return seafserv_threaded_rpc.get_shared_groups_for_subdir(repo_id, path, from_user) def get_shared_users_by_repo(self, repo_id): users = [] # get users that the repo is shared to shared_users = seafserv_threaded_rpc.get_shared_users_by_repo (repo_id) for user in shared_users: users.append(user.user) # get users in groups that the repo is shared to group_ids = seafserv_threaded_rpc.get_shared_groups_by_repo(repo_id) if not group_ids: return users ids = [] for group_id in group_ids.split('\n'): if not group_id: continue ids.append(int(group_id)) json_ids = json.dumps(ids) group_users = ccnet_threaded_rpc.get_groups_members(json_ids) for user in group_users: if user.user_name not in users: users.append(user.user_name) return users # organization wide repo def add_inner_pub_repo(self, repo_id, permission): return seafserv_threaded_rpc.set_inner_pub_repo(repo_id, permission) def remove_inner_pub_repo(self, repo_id): return seafserv_threaded_rpc.unset_inner_pub_repo(repo_id) def get_inner_pub_repo_list(self): """ Return: a list of Repo objects. """ return seafserv_threaded_rpc.list_inner_pub_repos() def list_inner_pub_repos_by_owner(self, repo_owner): """ Return: a list of Repo objects. """ return seafserv_threaded_rpc.list_inner_pub_repos_by_owner(repo_owner) def count_inner_pub_repos(self): return seafserv_threaded_rpc.count_inner_pub_repos() def is_inner_pub_repo(self, repo_id): return seafserv_threaded_rpc.is_inner_pub_repo(repo_id) # permission checks def check_permission(self, repo_id, user): """ Check repo share permissions. Only check user share, group share and inner-pub shares. Return: 'r', 'rw', or None """ return seafserv_threaded_rpc.check_permission(repo_id, user) def check_permission_by_path(self, repo_id, path, user): """ Check both repo share permission and sub-folder access permissions. This function should be used when updating file/folder in a repo. In CE, this function is equivalent to check_permission. Return: 'r', 'rw', or None """ return seafserv_threaded_rpc.check_permission_by_path(repo_id, path, user) def is_repo_syncable(self, repo_id, user, repo_perm): """ Check if the permission of the repo is syncable. """ return '{"is_syncable":true}' def is_dir_downloadable(self, repo_id, dir_path, user, repo_perm): """ Check if the permission of the dir is downloadable. {"is_downloadable": false, "undownloadable_path":"path"} - is_downloadable: true if the dir is downloadable, false if not. - undownloadable_path: the undownloadable path of the repo if the path is not downloadable. """ return '{"is_downloadable":true}' # token def generate_repo_token(self, repo_id, username): """Generate a token for sync a repo """ return seafserv_threaded_rpc.generate_repo_token(repo_id, username) def delete_repo_token(self, repo_id, token, user): return seafserv_threaded_rpc.delete_repo_token(repo_id, token, user) def list_repo_tokens(self, repo_id): """ Return: a list of RepoTokenInfo objects. """ return seafserv_threaded_rpc.list_repo_tokens(repo_id) def list_repo_tokens_by_email(self, username): return seafserv_threaded_rpc.list_repo_tokens_by_email(username) def delete_repo_tokens_by_peer_id(self, email, peer_id): return seafserv_threaded_rpc.delete_repo_tokens_by_peer_id(email, peer_id) def delete_repo_tokens_by_email(self, email): return seafserv_threaded_rpc.delete_repo_tokens_by_email(email) # quota def get_user_self_usage(self, username): """Get the sum of repos' size of the user""" return seafserv_threaded_rpc.get_user_quota_usage(username) def get_user_share_usage(self, username): # sum (repo_size * number_of_shares) return seafserv_threaded_rpc.get_user_share_usage(username) def get_user_quota(self, username): """ Return: -2 if quota is unlimited; otherwise it must be number > 0. """ return seafserv_threaded_rpc.get_user_quota(username) def set_user_quota(self, username, quota): return seafserv_threaded_rpc.set_user_quota(username, quota) def get_user_share_quota(self, username): return -2 # unlimited def set_user_share_quota(self, username, quota): pass def check_quota(self, repo_id, delta=0): return seafserv_threaded_rpc.check_quota(repo_id, delta) def list_user_quota_usage(self): return seafserv_threaded_rpc.list_user_quota_usage() # virtual repo def create_virtual_repo(self, origin_repo_id, path, repo_name, repo_desc, owner, passwd=''): return seafserv_threaded_rpc.create_virtual_repo(origin_repo_id, path, repo_name, repo_desc, owner, passwd) def get_virtual_repos_by_owner(self, owner): return seafserv_threaded_rpc.get_virtual_repos_by_owner(owner) def get_virtual_repo(self, origin_repo, path, owner): return seafserv_threaded_rpc.get_virtual_repo(origin_repo, path, owner) # Clean trash def clean_up_repo_history(self, repo_id, keep_days): return seafserv_threaded_rpc.clean_up_repo_history(repo_id, keep_days) # Trashed repos def get_trash_repo_list(self, start, limit): return seafserv_threaded_rpc.get_trash_repo_list(start, limit) def del_repo_from_trash(self, repo_id): return seafserv_threaded_rpc.del_repo_from_trash(repo_id) def restore_repo_from_trash(self, repo_id): return seafserv_threaded_rpc.restore_repo_from_trash(repo_id) def get_trash_repos_by_owner(self, owner): return seafserv_threaded_rpc.get_trash_repos_by_owner(owner) def get_trash_repo_owner (self, repo_id): return seafserv_threaded_rpc.get_trash_repo_owner(repo_id) def empty_repo_trash(self): return seafserv_threaded_rpc.empty_repo_trash() def empty_repo_trash_by_owner(self, owner): return seafserv_threaded_rpc.empty_repo_trash_by_owner(owner) # Server config def get_server_config_int (self, group, key): return seafserv_threaded_rpc.get_server_config_int (group, key) def set_server_config_int (self, group, key, value): return seafserv_threaded_rpc.set_server_config_int (group, key, value) def get_server_config_int64 (self, group, key): return seafserv_threaded_rpc.get_server_config_int64 (group, key) def set_server_config_int64 (self, group, key, value): return seafserv_threaded_rpc.set_server_config_int64 (group, key, value) def get_server_config_string (self, group, key): return seafserv_threaded_rpc.get_server_config_string (group, key) def set_server_config_string (self, group, key, value): return seafserv_threaded_rpc.set_server_config_string (group, key, value) def get_server_config_boolean (self, group, key): return bool(seafserv_threaded_rpc.get_server_config_boolean (group, key)) def set_server_config_boolean (self, group, key, value): i_value = 1 if bool(value) else 0 return seafserv_threaded_rpc.set_server_config_boolean (group, key, i_value) def del_org_group_repo(self, repo_id, org_id, group_id): seafserv_threaded_rpc.del_org_group_repo(repo_id, org_id, group_id) def org_get_shared_users_by_repo(self, org_id, repo_id): users = [] # get users that the repo is shared to shared_users = seafserv_threaded_rpc.org_get_shared_users_by_repo(org_id, repo_id) for user in shared_users: users.append(user.user) # get users in groups that the repo is shared to group_ids = seafserv_threaded_rpc.get_org_groups_by_repo(org_id, repo_id) if not group_ids: return users ids = [] for group_id in group_ids.split('\n'): if not group_id: continue ids.append(int(group_id)) json_ids = json.dumps(ids) group_users = ccnet_threaded_rpc.get_groups_members(json_ids) for user in group_users: if user.user_name not in users: users.append(user.user_name) return users def list_org_inner_pub_repos(self, org_id): return seafserv_threaded_rpc.list_org_inner_pub_repos(org_id) def convert_repo_path(self, repo_id, path, user, is_org=False): return seafserv_threaded_rpc.convert_repo_path(repo_id, path, user, 1 if is_org else 0) def publish_event(self, channel, content): return seafserv_threaded_rpc.publish_event(channel, content) def pop_event(self, channel): return seafserv_threaded_rpc.pop_event(channel) def search_files(self, repo_id, search_str): return seafserv_threaded_rpc.search_files(repo_id, search_str) def search_files_by_path (self, repo_id, path, search_str): return seafserv_threaded_rpc.search_files_by_path(repo_id, path, search_str) seafile_api = SeafileAPI() class CcnetAPI(object): def __init__(self): pass # user management def add_emailuser(self, email, passwd, is_staff, is_active): return ccnet_threaded_rpc.add_emailuser(email, passwd, is_staff, is_active) def remove_emailuser(self, source, email): """ source can be 'DB' or 'LDAP'. - 'DB': remove a user created in local database - 'LDAP': remove a user imported from LDAP """ return ccnet_threaded_rpc.remove_emailuser(source, email) def validate_emailuser(self, email, passwd): """ Verify user's password on login. Can be used to verify DB and LDAP users. The function first verify password with LDAP, then local database. """ return ccnet_threaded_rpc.validate_emailuser(email, passwd) def get_emailuser(self, email): """ Only return local database user or imported LDAP user. It first lookup user from local database, if not found, lookup imported LDAP user. Return: a list of EmailUser objects (ccnet/lib/ccnetobj.vala) The 'source' attribute of EmailUser object is set to 'LDAPImport' for LDAP imported user, and 'DB' for local database user. """ return ccnet_threaded_rpc.get_emailuser(email) def get_emailuser_with_import(self, email): """ The same as get_emailuser() but import the user from LDAP if it was not imported yet. """ return ccnet_threaded_rpc.get_emailuser_with_import(email) def get_emailuser_by_id(self, user_id): """ Get a user from local database with the db index id. """ return ccnet_threaded_rpc.get_emailuser_by_id(user_id) def get_emailusers(self, source, start, limit, is_active=None): """ source: - 'DB': return local db users - 'LDAPImport': return imported LDAP users - 'LDAP': retrieve users directly from LDAP server start: offset to start retrieving, -1 to start from the beginning limit: number of users to get, -1 to get all user from start is_active: True to return only active users; False to return inactive users; None to return all users. Return: a list of EmailUser objects. """ if is_active is True: status = "active" # list active users elif is_active is False: status = "inactive" # list inactive users else: status = "" # list all users return ccnet_threaded_rpc.get_emailusers(source, start, limit, status) def search_emailusers(self, source, email_patt, start, limit): """ Search for users whose name contains @email_patt. source: 'DB' for local db users; 'LDAP' for imported LDAP users. This function cannot search LDAP users directly in LDAP server. """ return ccnet_threaded_rpc.search_emailusers(source, email_patt, start, limit) def search_groups(self, group_patt, start, limit): """ Search for groups whose name contains @group_patt. """ return ccnet_threaded_rpc.search_groups(group_patt, start, limit) def search_group_members(self, group_id, pattern): return ccnet_threaded_rpc.search_group_members(group_id, pattern) def get_top_groups(self, including_org=False): return ccnet_threaded_rpc.get_top_groups(1 if including_org else 0) def get_child_groups(self, group_id): return ccnet_threaded_rpc.get_child_groups(group_id) def get_descendants_groups(self, group_id): return ccnet_threaded_rpc.get_descendants_groups(group_id) def get_ancestor_groups(self, group_id): return ccnet_threaded_rpc.get_ancestor_groups(group_id) def count_emailusers(self, source): """ Return the number of active users by source. source: 'DB' for local db users; 'LDAP' for imported LDAP users. """ return ccnet_threaded_rpc.count_emailusers(source) def count_inactive_emailusers(self, source): """ Return the number of inactive users by source. source: 'DB' for local db users; 'LDAP' for imported LDAP users. """ return ccnet_threaded_rpc.count_inactive_emailusers(source) def update_emailuser(self, source, user_id, password, is_staff, is_active): """ source: 'DB' for local db user; 'LDAP' for imported LDAP user. user_id: usually not changed. password: new password in plain text. Only effective for DB users. If '!' is passed, the password won't be updated. is_staff: change superuser status is_active: activate or deactivate user """ return ccnet_threaded_rpc.update_emailuser(source, user_id, password, is_staff, is_active) def update_role_emailuser(self, email, role, is_manual_set=True): return ccnet_threaded_rpc.update_role_emailuser(email, role) def get_superusers(self): """ Return: a list of EmailUser objects. """ return ccnet_threaded_rpc.get_superusers() def get_emailusers_in_list(self, source, user_list): """ @source: 'DB' or 'LDAP' @user_list: json '[user1, user2, user3,...]' """ return ccnet_threaded_rpc.get_emailusers_in_list(source, user_list) def update_emailuser_id (self, old_email, new_email): return ccnet_threaded_rpc.update_emailuser_id (old_email, new_email) # group management def create_group(self, group_name, user_name, gtype=None, parent_group_id=0): """ For CE, gtype is not used and should always be None. """ return ccnet_threaded_rpc.create_group(group_name, user_name, gtype, parent_group_id) def create_org_group(self, org_id, group_name, user_name, parent_group_id=0): return ccnet_threaded_rpc.create_org_group(org_id, group_name, user_name, parent_group_id) def remove_group(self, group_id): """ permission check should be done before calling this function. """ return ccnet_threaded_rpc.remove_group(group_id) def group_add_member(self, group_id, user_name, member_name): """ user_name: unused. """ return ccnet_threaded_rpc.group_add_member(group_id, user_name, member_name) def group_remove_member(self, group_id, user_name, member_name): """ user_name: unused. """ return ccnet_threaded_rpc.group_remove_member(group_id, user_name, member_name) def group_set_admin(self, group_id, member_name): """ No effect if member_name is not in the group. """ return ccnet_threaded_rpc.group_set_admin(group_id, member_name) def group_unset_admin(self, group_id, member_name): """ No effect if member_name is not in the group. """ return ccnet_threaded_rpc.group_unset_admin(group_id, member_name) def set_group_name(self, group_id, group_name): return ccnet_threaded_rpc.set_group_name(group_id, group_name) def quit_group(self, group_id, user_name): return ccnet_threaded_rpc.quit_group(group_id, user_name) def get_groups(self, user_name, return_ancestors=False): """ Get all groups the user belongs to. Return: a list of Group objects (ccnet/lib/ccnetobj.vala) """ return ccnet_threaded_rpc.get_groups(user_name, 1 if return_ancestors else 0) def get_all_groups(self, start, limit, source=None): """ For CE, source is not used and should alwasys be None. """ return ccnet_threaded_rpc.get_all_groups(start, limit, source) def get_group(self, group_id): return ccnet_threaded_rpc.get_group(group_id) def get_group_members(self, group_id, start=-1, limit=-1): """ Return a list of GroupUser objects (ccnet/lib/ccnetobj.vala) """ return ccnet_threaded_rpc.get_group_members(group_id, start, limit) def get_members_with_prefix (self, group_id, prefix=None): """ Return a list of GroupUser objects """ return ccnet_threaded_rpc.get_members_with_prefix(group_id, prefix) def check_group_staff(self, group_id, username, in_structure=False): """ Return non-zero value if true, 0 if not true """ return ccnet_threaded_rpc.check_group_staff(group_id, username, 1 if in_structure else 0) def remove_group_user(self, username): return ccnet_threaded_rpc.remove_group_user(username) def is_group_user(self, group_id, user, in_structure=True): """ Return non-zero value if true, 0 if not true If @in_structure is true, return whether user is in descendants groups and @group_id it self """ return ccnet_threaded_rpc.is_group_user(group_id, user, 1 if in_structure else 0) def set_group_creator(self, group_id, user_name): return ccnet_threaded_rpc.set_group_creator(group_id, user_name) # organization management def create_org(self, org_name, url_prefix, creator): return ccnet_threaded_rpc.create_org(org_name, url_prefix, creator) def remove_org(self, org_id): return ccnet_threaded_rpc.remove_org(org_id) def get_all_orgs(self, start, limit): """ Return a list of Organization objects (ccnet/lib/ccnetobj.vala) """ return ccnet_threaded_rpc.get_all_orgs(start, limit) def count_orgs(self): return ccnet_threaded_rpc.count_orgs() def get_org_by_url_prefix(self, url_prefix): """ Return an Organizaion object. """ return ccnet_threaded_rpc.get_org_by_url_prefix(url_prefix) def get_org_by_id(self, org_id): return ccnet_threaded_rpc.get_org_by_id(org_id) def add_org_user(self, org_id, email, is_staff): return ccnet_threaded_rpc.add_org_user(org_id, email, is_staff) def remove_org_user(self, org_id, email): return ccnet_threaded_rpc.remove_org_user(org_id, email) def get_orgs_by_user(self, email): return ccnet_threaded_rpc.get_orgs_by_user(email) def get_org_emailusers(self, url_prefix, start, limit): """ Return a list of EmailUser objects. """ return ccnet_threaded_rpc.get_org_emailusers(url_prefix, start, limit) def add_org_group(self, org_id, group_id): return ccnet_threaded_rpc.add_org_group(org_id, group_id) def remove_org_group(self, org_id, group_id): return ccnet_threaded_rpc.remove_org_group(org_id, group_id) def is_org_group(self, group_id): """ Return non-zero if True, otherwise 0. """ return ccnet_threaded_rpc.is_org_group(group_id) def get_org_id_by_group(self, group_id): return ccnet_threaded_rpc.get_org_id_by_group(group_id) def get_org_groups(self, org_id, start, limit): """ Return a list of int, each int is group id. """ return ccnet_threaded_rpc.get_org_groups(org_id, start, limit) def get_org_top_groups(self, org_id): return ccnet_threaded_rpc.get_org_top_groups(org_id) def org_user_exists(self, org_id, email): """ Return non-zero if True, otherwise 0. """ return ccnet_threaded_rpc.org_user_exists(org_id, email) def is_org_staff(self, org_id, user): """ Return non-zero if True, otherwise 0. """ return ccnet_threaded_rpc.is_org_staff(org_id, user) def set_org_staff(self, org_id, user): return ccnet_threaded_rpc.set_org_staff(org_id, user) def unset_org_staff(self, org_id, user): return ccnet_threaded_rpc.unset_org_staff(org_id, user) def set_org_name(self, org_id, org_name): return ccnet_threaded_rpc.set_org_name(org_id, org_name) def get_primary_id (self, email): return ccnet_threaded_rpc.get_primary_id(email) def get_groups_members(self, group_ids): """ @group_ids: json '[id1, id2, id3,...]' """ return ccnet_threaded_rpc.get_groups_members(group_ids) ccnet_api = CcnetAPI() ================================================ FILE: python/seaserv/service.py ================================================ from datetime import datetime import json import logging import os import sys import configparser from urllib.parse import urlparse import seafile import re from pysearpc import SearpcError _DEBUG = 'SEAFILE_DEBUG' in os.environ ENVIRONMENT_VARIABLES = ('SEAFILE_DATA_DIR', ) # Used to fix bug in some rpc calls, will be removed in near future. MAX_INT = 2147483647 def _load_path_from_env(key, check=True): v = os.environ.get(key, '') if not v: if check: raise ImportError( "Seaserv cannot be imported, because environment variable %s is undefined." % key ) return None if _DEBUG: print("Loading %s from %s" % (key, v)) return os.path.normpath(os.path.expanduser(v)) def _load_data_dir(): data_dir = _load_path_from_env('SEAFILE_DATA_DIR', check=False) if data_dir: return data_dir return _load_path_from_env('SEAFILE_CONF_DIR') SEAFILE_DATA_DIR = _load_data_dir() # SEAFILE_CENTRAL_CONF_DIR is required SEAFILE_CENTRAL_CONF_DIR = _load_path_from_env('SEAFILE_CENTRAL_CONF_DIR', check=True) SEAFILE_RPC_PIPE_PATH = _load_path_from_env ("SEAFILE_RPC_PIPE_PATH", check=False) seafile_pipe_path = os.path.join(SEAFILE_RPC_PIPE_PATH if SEAFILE_RPC_PIPE_PATH else SEAFILE_DATA_DIR, 'seafile.sock') seafserv_threaded_rpc = seafile.ServerThreadedRpcClient(seafile_pipe_path) ccnet_threaded_rpc = seafserv_threaded_rpc # load ccnet server addr and port from ccnet.conf. # 'addr:port' is used when downloading a repo config = configparser.ConfigParser() config.read(os.path.join(SEAFILE_CENTRAL_CONF_DIR, 'seafile.conf')) def get_fileserver_option(key, default): ''' "fileserver" used to be "httpserver" ''' for section in ('fileserver', 'httpserver'): if config.has_option(section, key): return config.get(section, key) return default USE_GO_FILESERVER = False if config.has_option('fileserver', 'use_go_fileserver'): USE_GO_FILESERVER = config.getboolean('fileserver', 'use_go_fileserver') if "ENABLE_GO_FILESERVER" in os.environ and os.environ["ENABLE_GO_FILESERVER"] == "true": USE_GO_FILESERVER = True MAX_UPLOAD_FILE_SIZE = None # Defaults to no limit try: max_upload_size_mb = int(get_fileserver_option('max_upload_size', 0)) if max_upload_size_mb > 0: MAX_UPLOAD_FILE_SIZE = max_upload_size_mb * 1000000 except ValueError: pass MAX_DOWNLOAD_DIR_SIZE = 100 * 1000000 # Default max size of a downloadable dir try: max_download_dir_size_mb = int(get_fileserver_option('max_download_dir_size', 0)) if max_download_dir_size_mb > 0: MAX_DOWNLOAD_DIR_SIZE = max_download_dir_size_mb * 1000000 except ValueError: pass FILE_SERVER_PORT = get_fileserver_option('port', '8082') FILE_SERVER_ROOT = None CALC_SHARE_USAGE = False if config.has_option('quota', 'calc_share_usage'): CALC_SHARE_USAGE = config.getboolean('quota', 'calc_share_usage') # Get an instance of a logger logger = logging.getLogger(__name__) #### Basic ccnet API #### def get_emailusers(source, start, limit, is_active=None): if is_active is True: status = "active" # list active users elif is_active is False: status = "inactive" # list inactive users else: status = "" # list all users return ccnet_threaded_rpc.get_emailusers(source, start, limit, status) def count_emailusers(): try: ret = ccnet_threaded_rpc.count_emailusers() except SearpcError: ret = -1 return 0 if ret < 0 else ret def get_emailuser_with_import(email): return ccnet_threaded_rpc.get_emailuser_with_import(email) # group def get_group(group_id): group_id_int = int(group_id) try: group = ccnet_threaded_rpc.get_group(group_id_int) except SearpcError: group = None return group def get_personal_groups(start, limit): try: groups_all = ccnet_threaded_rpc.get_all_groups(start, limit) except SearpcError: return [] return [ x for x in groups_all if not is_org_group(x.id) ] def get_personal_groups_by_user(email): try: groups_all = ccnet_threaded_rpc.get_groups(email) except SearpcError: return [] return [ x for x in groups_all if not is_org_group(x.id) ] # group user def is_group_user(group_id, user): try: ret = ccnet_threaded_rpc.is_group_user(group_id, user) except SearpcError: ret = 0 return ret def check_group_staff(group_id, username): """Check where user is group staff""" group_id = int(group_id) try: ret = ccnet_threaded_rpc.check_group_staff(group_id, username) except SearpcError as e: logger.error(e) ret = 0 return True if ret == 1 else False def remove_group_user(user): """ Remove group user relationship. """ return ccnet_threaded_rpc.remove_group_user(user) def get_group_members(group_id, start=-1, limit=-1): group_id_int = int(group_id) try: members = ccnet_threaded_rpc.get_group_members(group_id_int, start, limit) except SearpcError: members = [] return members # org group def is_org_group(group_id): try: ret = ccnet_threaded_rpc.is_org_group(group_id) except SearpcError: ret = -1 return True if ret == 1 else False def get_org_id_by_group(group_id): try: org_id = ccnet_threaded_rpc.get_org_id_by_group(group_id) except SearpcError: org_id = -1 return org_id def get_org_groups(org_id, start, limit): try: groups = ccnet_threaded_rpc.get_org_groups(org_id, start, limit) except SearpcError: groups = [] return groups def get_org_groups_by_user(org_id, user): """ Get user's groups in org. """ try: groups_all = ccnet_threaded_rpc.get_groups(user) except SearpcError: return [] return [ x for x in groups_all if org_id == get_org_id_by_group(x.id) ] # org def create_org(org_name, url_prefix, username): ccnet_threaded_rpc.create_org(org_name, url_prefix, username) def get_org_by_url_prefix(url_prefix): try: org = ccnet_threaded_rpc.get_org_by_url_prefix(url_prefix) except SearpcError: org = None return org def get_org_by_id(org_id): try: org = ccnet_threaded_rpc.get_org_by_id(org_id) except SearpcError: org = None return org # org user def add_org_user(org_id, email, is_staff): try: ccnet_threaded_rpc.add_org_user(org_id, email, is_staff) except SearpcError: pass def remove_org_user(org_id, email): try: ccnet_threaded_rpc.remove_org_user(org_id, email) except SearpcError: pass def org_user_exists(org_id, user): try: ret = ccnet_threaded_rpc.org_user_exists(org_id, user) except SearpcError: ret = -1 return True if ret == 1 else False def get_org_users_by_url_prefix(url_prefix, start, limit): """ List org users. """ try: users = ccnet_threaded_rpc.get_org_emailusers(url_prefix, start, limit) except: users = [] return users def get_orgs_by_user(user): try: orgs = ccnet_threaded_rpc.get_orgs_by_user(user) except SearpcError: orgs = [] return orgs def is_org_staff(org_id, user): """ Check whether user is staff of a org. """ try: ret = ccnet_threaded_rpc.is_org_staff(org_id, user) except SearpcError: ret = -1 return True if ret == 1 else False def get_user_current_org(user, url_prefix): orgs = get_orgs_by_user(user) for org in orgs: if org.url_prefix == url_prefix: return org return None def send_command(command): client = pool.get_client() client.send_cmd(command) ret = client.response[2] pool.return_client(client) return ret def send_message(msg_type, content): client = pool.get_client() client.send_message(msg_type, content) pool.return_client(client) def get_binding_peerids(email): """Get peer ids of a given email""" try: peer_ids = ccnet_threaded_rpc.get_binding_peerids(email) except SearpcError: return [] if not peer_ids: return [] peerid_list = [] for peer_id in peer_ids.split("\n"): if peer_id == '': continue peerid_list.append(peer_id) return peerid_list ######## seafserv API #### # repo def get_repos(): """ Return repository list. """ return seafserv_threaded_rpc.get_repo_list("", 100) def get_repo(repo_id): return seafserv_threaded_rpc.get_repo(repo_id) def edit_repo(repo_id, name, desc, user): try: ret = seafserv_threaded_rpc.edit_repo(repo_id, name, desc, user) except SearpcError as e: ret = -1 return True if ret == 0 else False def create_repo(name, desc, user, passwd): """ Return repo id if successfully created a repo, otherwise None. """ try: ret = seafserv_threaded_rpc.create_repo(name, desc, user, passwd) except SearpcError as e: logger.error(e) ret = None return ret def remove_repo(repo_id): """ Return true if successfully removed a repo, otherwise false. """ try: ret = seafserv_threaded_rpc.remove_repo(repo_id) except SearpcError as e: logger.error(e) ret = -1 return True if ret == 0 else False def list_personal_repos_by_owner(owner): """ List users owned repos in personal context. """ try: repos = seafserv_threaded_rpc.list_owned_repos(owner) except SearpcError: repos = [] return repos def get_repo_token_nonnull(repo_id, username): return seafserv_threaded_rpc.get_repo_token_nonnull (repo_id, username) def get_repo_owner(repo_id): """ Get owner of a repo. """ try: ret = seafserv_threaded_rpc.get_repo_owner(repo_id) except SearpcError: ret = '' return ret def is_repo_owner(user, repo_id): """ Check whether user is repo owner. """ try: ret = seafserv_threaded_rpc.is_repo_owner(user, repo_id) except SearpcError: ret = 0 return ret def server_repo_size(repo_id): try: size = seafserv_threaded_rpc.server_repo_size(repo_id) except SearpcError: size = 0 return size # org repo def create_org_repo(repo_name, repo_desc, user, passwd, org_id): """ Create org repo, return valid repo id if success. """ try: repo_id = seafserv_threaded_rpc.create_org_repo(repo_name, repo_desc, user, passwd, org_id) except SearpcError: repo_id = None return repo_id def is_org_repo(repo_id): org_id = get_org_id_by_repo_id(repo_id) return True if org_id > 0 else False def list_org_repos_by_owner(org_id, user): try: repos = seafserv_threaded_rpc.list_org_repos_by_owner(org_id, user) except SearpcError: repos = [] return repos def get_org_repos(org_id, start, limit): """ List repos created in org. """ try: repos = seafserv_threaded_rpc.get_org_repo_list(org_id, start, limit) except SearpcError: repos = [] if repos: for r in repos: r.owner = get_org_repo_owner(r.id) return repos def get_org_id_by_repo_id(repo_id): """ Get org id according repo id. """ try: org_id = seafserv_threaded_rpc.get_org_id_by_repo_id(repo_id) except SearpcError: org_id = -1 return org_id def is_org_repo_owner(org_id, repo_id, user): """ Check whether user is org repo owner. NOTE: `org_id` may used in future. """ owner = get_org_repo_owner(repo_id) if not owner: return False return True if owner == user else False def get_org_repo_owner(repo_id): """ Get owner of org repo. """ try: owner = seafserv_threaded_rpc.get_org_repo_owner(repo_id) except SearpcError: owner = None return owner # commit def get_commit(repo_id, repo_version, cmt_id): """ Get a commit. """ try: ret = seafserv_threaded_rpc.get_commit(repo_id, repo_version, cmt_id) except SearpcError: ret = None return ret def get_commits(repo_id, offset, limit): """Get commit lists.""" try: ret = seafserv_threaded_rpc.get_commit_list(repo_id, offset, limit) except SearpcError: ret = None return ret # branch def get_branches(repo_id): """Get branches of a given repo""" return seafserv_threaded_rpc.branch_gets(repo_id) # group repo def get_group_repos_by_owner(user): """ List user's repos that are sharing to groups """ try: ret = seafserv_threaded_rpc.get_group_repos_by_owner(user) except SearpcError: ret = [] return ret def get_shared_groups_by_repo(repo_id): try: group_ids = seafserv_threaded_rpc.get_shared_groups_by_repo(repo_id) except SearpcError: group_ids = '' if not group_ids: return [] groups = [] for group_id in group_ids.split('\n'): if not group_id: continue group = get_group(group_id) if group: groups.append(group) return groups def conv_repoids_to_list(repo_ids): """ Convert repo ids seperated by "\n" to list. """ if not repo_ids: return [] repoid_list = [] for repo_id in repo_ids.split("\n"): if repo_id == '': continue repoid_list.append(repo_id) return repoid_list def get_group_repoids(group_id): """Get repo ids of a given group id.""" try: repo_ids = seafserv_threaded_rpc.get_group_repoids(group_id) except SearpcError: return [] return conv_repoids_to_list(repo_ids) def get_group_repos(group_id, user): """Get repos of a given group id.""" repoid_list = get_group_repoids(group_id) repos = [] for repo_id in repoid_list: if not repo_id: continue repo = get_repo(repo_id) if not repo: continue repo.owner = seafserv_threaded_rpc.get_group_repo_owner(repo_id) repo.share_from_me = True if user == repo.owner else False last_commit = get_commits(repo.id, 0, 1)[0] repo.latest_modify = last_commit.ctime if last_commit else None repos.append(repo) repos.sort(lambda x, y: cmp(y.latest_modify, x.latest_modify)) return repos # org group repo def del_org_group_repo(repo_id, org_id, group_id): seafserv_threaded_rpc.del_org_group_repo(repo_id, org_id, group_id) def get_org_group_repoids(org_id, group_id): try: repo_ids = seafserv_threaded_rpc.get_org_group_repoids(org_id, group_id) except SearpcError: repo_ids = '' return conv_repoids_to_list(repo_ids) def get_org_group_repos(org_id, group_id, user): """Get org repos of a given group id.""" repoid_list = get_org_group_repoids(org_id, group_id) if not repoid_list: return [] repos = [] for repo_id in repoid_list: if not repo_id: continue repo = get_repo(repo_id) if not repo: continue repo.owner = seafserv_threaded_rpc.get_org_group_repo_owner(org_id, group_id, repo_id) repo.sharecd_from_me = True if user == repo.owner else False last_commit = get_commits(repo.id, 0, 1)[0] repo.latest_modify = last_commit.ctime if last_commit else None repos.append(repo) repos.sort(lambda x, y: cmp(y.latest_modify, x.latest_modify)) return repos def get_org_groups_by_repo(org_id, repo_id): try: group_ids = seafserv_threaded_rpc.get_org_groups_by_repo(org_id, repo_id) except SearpcError: group_ids = '' if not group_ids: return [] groups = [] for group_id in group_ids.split('\n'): if not group_id: continue group = get_group(group_id) if group: groups.append(group) return groups # inner pub repo def list_inner_pub_repos_by_owner(user): """ List a user's inner pub repos. """ try: ret = seafserv_threaded_rpc.list_inner_pub_repos_by_owner(user) except SearpcError: ret = [] return ret def list_inner_pub_repos(username): """ List inner pub repos, which can be access by everyone. """ try: shared_repos = seafserv_threaded_rpc.list_inner_pub_repos() except: shared_repos = [] for repo in shared_repos: repo.user_perm = check_permission(repo.props.repo_id, username) shared_repos.sort(lambda x, y: cmp(y.props.last_modified, x.props.last_modified)) return shared_repos def count_inner_pub_repos(): try: ret = seafserv_threaded_rpc.count_inner_pub_repos() except SearpcError: ret = -1 return 0 if ret < 0 else ret def is_inner_pub_repo(repo_id): """ Check whether a repo is public. Return 0 if repo is not inner public, otherwise non-zero. """ try: ret = seafserv_threaded_rpc.is_inner_pub_repo(repo_id) except SearpcError: ret = 0 return ret def unset_inner_pub_repo(repo_id): seafserv_threaded_rpc.unset_inner_pub_repo(repo_id) # org inner pub repo def list_org_inner_pub_repos(org_id, username, start=None, limit=None): """ List org inner pub repos, which can be access by all org members. """ try: shared_repos = seafserv_threaded_rpc.list_org_inner_pub_repos(org_id) except SearpcError: shared_repos = [] for repo in shared_repos: repo.user_perm = check_permission(repo.props.repo_id, username) # sort repos by last modify time shared_repos.sort(lambda x, y: cmp(y.props.last_modified, x.props.last_modified)) return shared_repos # repo permissoin def check_permission(repo_id, user): """ Check whether user has permission to access repo. Return values can be 'rw' or 'r' or None. """ try: ret = seafserv_threaded_rpc.check_permission(repo_id, user) except SearpcError: ret = None return ret def is_personal_repo(repo_id): """ Check whether repo is personal repo. """ try: owner = seafserv_threaded_rpc.get_repo_owner(repo_id) except SearpcError: owner = '' return True if owner else False # shared repo def list_share_repos(user, share_type, start, limit): try: ret = seafserv_threaded_rpc.list_share_repos(user, share_type, start, limit) except SearpcError: ret = [] return ret def remove_share(repo_id, from_user, to_user): seafserv_threaded_rpc.remove_share(repo_id, from_user, to_user) def unshare_group_repo(repo_id, group_id, from_user): return seafserv_threaded_rpc.group_unshare_repo(repo_id, int(group_id), from_user) def list_personal_shared_repos(user, user_type, start, limit): """ List personal repos that user share with others. If `user_type` is 'from_email', list repos user shares to others; If `user_type` is 'to_email', list repos others share to user. """ share_repos = list_share_repos(user, user_type, start, limit) for repo in share_repos: repo.user_perm = check_permission(repo.props.repo_id, user) share_repos.sort(lambda x, y: cmp(y.last_modified, x.last_modified)) return share_repos def list_org_shared_repos(org_id, user, user_type, start, limit): """ List org repos that user share with others. If `user_type` is 'from_email', list repos user shares to others; If `user_type` is 'to_email', list repos others sahre to user. """ try: share_repos = seafserv_threaded_rpc.list_org_share_repos(org_id, user, user_type, start, limit) except SearpcError: share_repos = [] for repo in share_repos: repo.user_perm = check_permission(repo.props.repo_id, user) share_repos.sort(lambda x, y: cmp(y.last_modified, x.last_modified)) return share_repos # dir def list_dir_by_path(repo_id, commit_id, path): try: ret = seafserv_threaded_rpc.list_dir_by_path(repo_id, commit_id, path) except SearpcError: ret = None return ret # file def post_empty_file(repo_id, parent_dir, file_name, user): """ Return true if successfully make a new file, otherwise false. """ try: ret = seafserv_threaded_rpc.post_empty_file(repo_id, parent_dir, file_name, user) except SearpcError as e: logger.error(e) ret = -1 return True if ret == 0 else False def del_file(repo_id, parent_dir, file_name, user): """ Return true if successfully delete a file, otherwise false. """ try: ret = seafserv_threaded_rpc.del_file(repo_id, parent_dir, file_name, user) except SearpcError as e: logger.error(e) ret = -1 return True if ret == 0 else False # misc functions def is_valid_filename(file_or_dir): """ Check whether file name or directory name is valid. """ try: ret = seafserv_threaded_rpc.is_valid_filename('', file_or_dir) except SearpcError: ret = 0 return ret def get_file_size(store_id, version, file_id): try: fs = seafserv_threaded_rpc.get_file_size(store_id, version, file_id) except SearpcError as e: fs = 0 return fs def get_file_id_by_path(repo_id, path): try: ret = seafserv_threaded_rpc.get_file_id_by_path(repo_id, path) except SearpcError as e: ret = '' return ret def get_related_users_by_repo(repo_id): """Give a repo id, returns a list of users of: - the repo owner - members of groups to which the repo is shared - users to which the repo is shared """ owner = seafserv_threaded_rpc.get_repo_owner(repo_id) if not owner: # Can't happen return [] users = [owner] groups = get_shared_groups_by_repo(repo_id) for group in groups: members = get_group_members(group.id) for member in members: if member.user_name not in users: users.append(member.user_name) share_repos = list_share_repos(owner, 'from_email', -1, -1) for repo in share_repos: if repo.repo_id == repo_id: if repo.user not in users: users.append(repo.user) return users def get_related_users_by_org_repo(org_id, repo_id): """Org version of get_related_users_by_repo """ owner = get_org_repo_owner(repo_id) if not owner: # Can't happen return [] users = [owner] groups = get_org_groups_by_repo(org_id, repo_id) for group in groups: members = get_group_members(group.id) for member in members: if member.user_name not in users: users.append(member.user_name) share_repos = seafserv_threaded_rpc.list_org_share_repos(org_id, \ owner, 'from_email', -1, -1) for repo in share_repos: if repo.repo_id == repo_id: if repo.user not in users: users.append(repo.user) return users # quota def check_quota(repo_id, delta=0): try: ret = seafserv_threaded_rpc.check_quota(repo_id, delta) except SearpcError as e: logger.error(e) ret = -1 return ret def get_user_quota(user): try: ret = seafserv_threaded_rpc.get_user_quota(user) except SearpcError as e: logger.error(e) ret = 0 return ret def get_user_quota_usage(user): try: ret = seafserv_threaded_rpc.get_user_quota_usage(user) except SearpcError as e: logger.error(e) ret = 0 return ret def get_user_share_usage(user): try: ret = seafserv_threaded_rpc.get_user_share_usage(user) except SearpcError as e: logger.error(e) ret = 0 return ret # access token def web_get_access_token(repo_id, obj_id, op, username, use_onetime=1): try: ret = seafserv_rpc.web_get_access_token(repo_id, obj_id, op, username, use_onetime) except SearpcError as e: ret = '' return ret # password management def unset_repo_passwd(repo_id, user): """ Remove user password of a encrypt repo. Arguments: - `repo_id`: encrypt repo id - `user`: username """ try: ret = seafserv_threaded_rpc.unset_passwd(repo_id, user) except SearpcError as e: ret = -1 return ret def is_passwd_set(repo_id, user): try: ret = seafserv_rpc.is_passwd_set(repo_id, user) except SearpcError as e: ret = -1 return True if ret == 1 else False # repo history limit def get_repo_history_limit(repo_id): try: ret = seafserv_threaded_rpc.get_repo_history_limit(repo_id) except SearpcError as e: ret = -1 return ret def set_repo_history_limit(repo_id, days): try: ret = seafserv_threaded_rpc.set_repo_history_limit(repo_id, days) except SearpcError as e: ret = -1 return ret ================================================ FILE: run_tests.sh ================================================ #!/bin/bash set -e SCRIPT=${BASH_SOURCE[0]} PROJECT_DIR=$(dirname "${SCRIPT}") cd $PROJECT_DIR export PYTHONPATH=$PROJECT_DIR:$PYTHONPATH ci/run.py --test-only ================================================ FILE: scripts/Makefile.am ================================================ bin_SCRIPTS = parse_seahub_db.py EXTRA_DIST = parse_seahub_db.py ================================================ FILE: scripts/parse_seahub_db.py ================================================ import json import seahub_settings db_infos = seahub_settings.DATABASES['default'] print(json.dumps(db_infos)) ================================================ FILE: scripts/sql/mysql/ccnet.sql ================================================ CREATE TABLE IF NOT EXISTS Binding ( id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, email VARCHAR(255), peer_id CHAR(41), UNIQUE INDEX (peer_id), INDEX (email(20)) ) ENGINE=INNODB; CREATE TABLE IF NOT EXISTS EmailUser ( id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, email VARCHAR(255), passwd VARCHAR(256), is_staff BOOL NOT NULL, is_active BOOL NOT NULL, is_department_owner BOOL NOT NULL DEFAULT 0, ctime BIGINT, reference_id VARCHAR(255), UNIQUE INDEX (email), UNIQUE INDEX (reference_id), INDEX (is_active), INDEX (is_department_owner) ) ENGINE=INNODB; CREATE TABLE IF NOT EXISTS `Group` ( `group_id` BIGINT PRIMARY KEY AUTO_INCREMENT, `group_name` VARCHAR(255), `creator_name` VARCHAR(255), `timestamp` BIGINT, `type` VARCHAR(32), `parent_group_id` INTEGER ) ENGINE=INNODB; CREATE TABLE IF NOT EXISTS GroupDNPair ( id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, group_id INTEGER, dn VARCHAR(255) )ENGINE=INNODB; CREATE TABLE IF NOT EXISTS GroupStructure ( id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, group_id INTEGER, path VARCHAR(1024), UNIQUE INDEX(group_id) )ENGINE=INNODB; CREATE TABLE IF NOT EXISTS `GroupUser` ( `id` BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, `group_id` BIGINT, `user_name` VARCHAR(255), `is_staff` tinyint, UNIQUE INDEX (`group_id`, `user_name`), INDEX (`user_name`) ) ENGINE=INNODB; CREATE TABLE IF NOT EXISTS LDAPConfig ( id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, cfg_group VARCHAR(255) NOT NULL, cfg_key VARCHAR(255) NOT NULL, value VARCHAR(255), property INTEGER ) ENGINE=INNODB; CREATE TABLE IF NOT EXISTS LDAPUsers ( id BIGINT PRIMARY KEY AUTO_INCREMENT, email VARCHAR(255) NOT NULL, password varchar(255) NOT NULL, is_staff BOOL NOT NULL, is_active BOOL NOT NULL, extra_attrs TEXT, reference_id VARCHAR(255), UNIQUE INDEX(email), UNIQUE INDEX (reference_id) ) ENGINE=INNODB; CREATE TABLE IF NOT EXISTS OrgGroup ( id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, org_id INTEGER, group_id INTEGER, INDEX (group_id), UNIQUE INDEX(org_id, group_id) ) ENGINE=INNODB; CREATE TABLE IF NOT EXISTS OrgUser ( id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, org_id INTEGER, email VARCHAR(255), is_staff BOOL NOT NULL, INDEX (email), UNIQUE INDEX(org_id, email) ) ENGINE=INNODB; CREATE TABLE IF NOT EXISTS Organization ( org_id BIGINT PRIMARY KEY AUTO_INCREMENT, org_name VARCHAR(255), url_prefix VARCHAR(255), creator VARCHAR(255), ctime BIGINT, UNIQUE INDEX (url_prefix) ) ENGINE=INNODB; CREATE TABLE IF NOT EXISTS UserRole ( id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, email VARCHAR(255), role VARCHAR(255), is_manual_set INTEGER DEFAULT 0, UNIQUE INDEX (email) ) ENGINE=INNODB; CREATE TABLE IF NOT EXISTS OrgFileExtWhiteList ( id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, org_id INTEGER, white_list TEXT, UNIQUE INDEX (org_id) ) ENGINE=INNODB; ================================================ FILE: scripts/sql/mysql/seafile.sql ================================================ CREATE TABLE IF NOT EXISTS Branch ( id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, name VARCHAR(10), repo_id CHAR(41), commit_id CHAR(41), UNIQUE INDEX(repo_id, name) ) ENGINE = INNODB; CREATE TABLE IF NOT EXISTS FileLockTimestamp ( id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, repo_id CHAR(40), update_time BIGINT NOT NULL, UNIQUE INDEX(repo_id) ); CREATE TABLE IF NOT EXISTS FileLocks ( id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, repo_id CHAR(40) NOT NULL, path TEXT NOT NULL, user_name VARCHAR(255) NOT NULL, lock_time BIGINT, expire BIGINT, KEY(repo_id) ) ENGINE=INNODB; CREATE TABLE IF NOT EXISTS FolderGroupPerm ( id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, repo_id CHAR(36) NOT NULL, path TEXT NOT NULL, permission CHAR(15), group_id INTEGER NOT NULL, INDEX(repo_id) ) ENGINE=INNODB; CREATE TABLE IF NOT EXISTS FolderPermTimestamp ( id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, repo_id CHAR(36), timestamp BIGINT, UNIQUE INDEX(repo_id) ) ENGINE=INNODB; CREATE TABLE IF NOT EXISTS FolderUserPerm ( id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, repo_id CHAR(36) NOT NULL, path TEXT NOT NULL, permission CHAR(15), user VARCHAR(255) NOT NULL, INDEX(repo_id) ) ENGINE=INNODB; CREATE TABLE IF NOT EXISTS GCID ( id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, repo_id CHAR(36), gc_id CHAR(36), UNIQUE INDEX(repo_id) ) ENGINE=INNODB; CREATE TABLE IF NOT EXISTS GarbageRepos ( id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, repo_id CHAR(36), UNIQUE INDEX(repo_id) ); CREATE TABLE IF NOT EXISTS InnerPubRepo ( id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, repo_id CHAR(37), permission CHAR(15), UNIQUE INDEX (repo_id) ) ENGINE=INNODB; CREATE TABLE IF NOT EXISTS LastGCID ( id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, repo_id CHAR(36), client_id VARCHAR(128), gc_id CHAR(36), UNIQUE INDEX(repo_id, client_id) ) ENGINE=INNODB; CREATE TABLE IF NOT EXISTS OrgGroupRepo ( id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, org_id INTEGER, repo_id CHAR(37), group_id INTEGER, owner VARCHAR(255), permission CHAR(15), UNIQUE INDEX(org_id, group_id, repo_id), INDEX (repo_id), INDEX (owner) ) ENGINE=INNODB; CREATE TABLE IF NOT EXISTS OrgInnerPubRepo ( id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, org_id INTEGER, repo_id CHAR(37), UNIQUE INDEX(org_id, repo_id), permission CHAR(15) ) ENGINE=INNODB; CREATE TABLE IF NOT EXISTS OrgQuota ( id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, org_id INTEGER, quota BIGINT, UNIQUE INDEX(org_id) ) ENGINE=INNODB; CREATE TABLE IF NOT EXISTS OrgRepo ( id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, org_id INTEGER, repo_id CHAR(37), user VARCHAR(255), UNIQUE INDEX(org_id, repo_id), UNIQUE INDEX (repo_id), INDEX (org_id, user), INDEX(user) ) ENGINE=INNODB; CREATE TABLE IF NOT EXISTS OrgSharedRepo ( id INTEGER NOT NULL PRIMARY KEY AUTO_INCREMENT, org_id INT, repo_id CHAR(37) , from_email VARCHAR(255), to_email VARCHAR(255), permission CHAR(15), INDEX(repo_id), INDEX (org_id, repo_id), INDEX(from_email), INDEX(to_email) ) ENGINE=INNODB; CREATE TABLE IF NOT EXISTS OrgUserQuota ( id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, org_id INTEGER, user VARCHAR(255), quota BIGINT, UNIQUE INDEX(org_id, user) ) ENGINE=INNODB; CREATE TABLE IF NOT EXISTS Repo ( id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, repo_id CHAR(37), UNIQUE INDEX (repo_id) ) ENGINE=INNODB; CREATE TABLE IF NOT EXISTS RepoFileCount ( id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, repo_id CHAR(36), file_count BIGINT UNSIGNED, UNIQUE INDEX(repo_id) ) ENGINE=INNODB; CREATE TABLE IF NOT EXISTS RepoGroup ( id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, repo_id CHAR(37), group_id INTEGER, user_name VARCHAR(255), permission CHAR(15), UNIQUE INDEX(group_id, repo_id), INDEX (repo_id), INDEX (user_name) ) ENGINE=INNODB; CREATE TABLE IF NOT EXISTS RepoHead ( id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, repo_id CHAR(37), branch_name VARCHAR(10), UNIQUE INDEX(repo_id) ) ENGINE=INNODB; CREATE TABLE IF NOT EXISTS RepoHistoryLimit ( id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, repo_id CHAR(37), days INTEGER, UNIQUE INDEX(repo_id) ) ENGINE=INNODB; CREATE TABLE IF NOT EXISTS RepoInfo (id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, repo_id CHAR(36), name VARCHAR(255) NOT NULL, update_time BIGINT, version INTEGER, is_encrypted INTEGER, last_modifier VARCHAR(255), status INTEGER DEFAULT 0, type VARCHAR(10), UNIQUE INDEX(repo_id), INDEX (type) ) ENGINE=INNODB; CREATE TABLE IF NOT EXISTS RepoOwner ( id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, repo_id CHAR(37), owner_id VARCHAR(255), UNIQUE INDEX (repo_id), INDEX (owner_id) ) ENGINE=INNODB; CREATE TABLE IF NOT EXISTS RepoSize ( id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, repo_id CHAR(37), size BIGINT UNSIGNED, head_id CHAR(41), UNIQUE INDEX (repo_id) ) ENGINE=INNODB; CREATE TABLE IF NOT EXISTS RepoStorageId ( id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, repo_id CHAR(40) NOT NULL, storage_id VARCHAR(255) NOT NULL, UNIQUE INDEX(repo_id) ) ENGINE=INNODB; CREATE TABLE IF NOT EXISTS RepoSyncError ( id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, token CHAR(41), error_time BIGINT UNSIGNED, error_con VARCHAR(1024), UNIQUE INDEX(token) ) ENGINE=INNODB; CREATE TABLE IF NOT EXISTS RepoTokenPeerInfo ( id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, token CHAR(41), peer_id CHAR(41), peer_ip VARCHAR(50), peer_name VARCHAR(255), sync_time BIGINT, client_ver VARCHAR(20), UNIQUE INDEX(token), INDEX(peer_id) ) ENGINE=INNODB; CREATE TABLE IF NOT EXISTS RepoTrash ( id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, repo_id CHAR(36), repo_name VARCHAR(255), head_id CHAR(40), owner_id VARCHAR(255), size BIGINT(20), org_id INTEGER, del_time BIGINT, UNIQUE INDEX(repo_id), INDEX(owner_id), INDEX(org_id) ) ENGINE=INNODB; CREATE TABLE IF NOT EXISTS RepoUserToken ( id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, repo_id CHAR(37), email VARCHAR(255), token CHAR(41), UNIQUE INDEX(repo_id, token), INDEX(token), INDEX (email) ) ENGINE=INNODB; CREATE TABLE IF NOT EXISTS RepoValidSince ( id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, repo_id CHAR(37), timestamp BIGINT, UNIQUE INDEX(repo_id) ) ENGINE=INNODB; CREATE TABLE IF NOT EXISTS RoleQuota ( id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, role VARCHAR(255), quota BIGINT, UNIQUE INDEX(role) ) ENGINE=INNODB; CREATE TABLE IF NOT EXISTS SeafileConf ( id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, cfg_group VARCHAR(255) NOT NULL, cfg_key VARCHAR(255) NOT NULL, value VARCHAR(255), property INTEGER ) ENGINE=INNODB; CREATE TABLE IF NOT EXISTS SharedRepo ( id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, repo_id CHAR(37) , from_email VARCHAR(255), to_email VARCHAR(255), permission CHAR(15), INDEX (repo_id), INDEX(from_email), INDEX(to_email) ) ENGINE=INNODB; CREATE TABLE IF NOT EXISTS SystemInfo ( id INTEGER NOT NULL PRIMARY KEY AUTO_INCREMENT, info_key VARCHAR(256), info_value VARCHAR(1024) ); CREATE TABLE IF NOT EXISTS UserQuota ( id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, user VARCHAR(255), quota BIGINT, UNIQUE INDEX(user) ) ENGINE=INNODB; CREATE TABLE IF NOT EXISTS UserShareQuota ( id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, user VARCHAR(255), quota BIGINT, UNIQUE INDEX(user) ) ENGINE=INNODB; CREATE TABLE IF NOT EXISTS VirtualRepo ( id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, repo_id CHAR(36), origin_repo CHAR(36), path TEXT, base_commit CHAR(40), UNIQUE INDEX(repo_id), INDEX(origin_repo) ) ENGINE=INNODB; CREATE TABLE IF NOT EXISTS WebAP ( id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, repo_id CHAR(37), access_property CHAR(10), UNIQUE INDEX(repo_id) ) ENGINE=INNODB; CREATE TABLE IF NOT EXISTS WebUploadTempFiles ( id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, repo_id CHAR(40) NOT NULL, file_path TEXT NOT NULL, tmp_file_path TEXT NOT NULL, INDEX(repo_id) ) ENGINE=INNODB; CREATE TABLE IF NOT EXISTS RoleUploadRateLimit ( id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, role VARCHAR(255), upload_limit BIGINT, UNIQUE INDEX(role) ) ENGINE=INNODB; CREATE TABLE IF NOT EXISTS RoleDownloadRateLimit ( id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, role VARCHAR(255), download_limit BIGINT, UNIQUE INDEX(role) ) ENGINE=INNODB; CREATE TABLE IF NOT EXISTS UserUploadRateLimit ( id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, user VARCHAR(255), upload_limit BIGINT, UNIQUE INDEX(user) ) ENGINE=INNODB; CREATE TABLE IF NOT EXISTS UserDownloadRateLimit ( id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, user VARCHAR(255), download_limit BIGINT, UNIQUE INDEX(user) ) ENGINE=INNODB; CREATE TABLE IF NOT EXISTS OrgUserDefaultQuota ( id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, org_id INTEGER, quota BIGINT, UNIQUE INDEX(org_id) ) ENGINE=INNODB; CREATE TABLE IF NOT EXISTS OrgDownloadRateLimit ( id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, org_id INTEGER, download_limit BIGINT, UNIQUE INDEX(org_id) ) ENGINE=INNODB; CREATE TABLE IF NOT EXISTS OrgUploadRateLimit ( id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, org_id INTEGER, upload_limit BIGINT, UNIQUE INDEX(org_id) ) ENGINE=INNODB; ================================================ FILE: scripts/sql/sqlite/config.sql ================================================ CREATE TABLE IF NOT EXISTS Config (key TEXT PRIMARY KEY, value TEXT); ================================================ FILE: scripts/sql/sqlite/groupmgr.sql ================================================ CREATE TABLE IF NOT EXISTS `Group` (`group_id` INTEGER PRIMARY KEY AUTOINCREMENT, `group_name` VARCHAR(255), `creator_name` VARCHAR(255), `timestamp` BIGINT, `type` VARCHAR(32), `parent_group_id` INTEGER); CREATE TABLE IF NOT EXISTS `GroupUser` (`group_id` INTEGER, `user_name` VARCHAR(255), `is_staff` tinyint); CREATE UNIQUE INDEX IF NOT EXISTS groupid_username_indx on `GroupUser` (`group_id`, `user_name`); CREATE INDEX IF NOT EXISTS username_indx on `GroupUser` (`user_name`); CREATE TABLE IF NOT EXISTS GroupDNPair (group_id INTEGER, dn VARCHAR(255)); CREATE TABLE IF NOT EXISTS GroupStructure (group_id INTEGER PRIMARY KEY, path VARCHAR(1024)); ================================================ FILE: scripts/sql/sqlite/org.sql ================================================ CREATE TABLE IF NOT EXISTS OrgGroup (org_id INTEGER, group_id INTEGER); CREATE INDEX IF NOT EXISTS groupid_indx on OrgGroup (group_id); CREATE TABLE IF NOT EXISTS Organization (org_id INTEGER PRIMARY KEY AUTOINCREMENT, org_name VARCHAR(255), url_prefix VARCHAR(255), creator VARCHAR(255), ctime BIGINT); CREATE UNIQUE INDEX IF NOT EXISTS url_prefix_indx on Organization (url_prefix); CREATE TABLE IF NOT EXISTS OrgUser (org_id INTEGER, email TEXT, is_staff bool NOT NULL); CREATE INDEX IF NOT EXISTS email_indx on OrgUser (email); CREATE UNIQUE INDEX IF NOT EXISTS orgid_email_indx on OrgUser (org_id, email); ================================================ FILE: scripts/sql/sqlite/seafile.sql ================================================ CREATE TABLE IF NOT EXISTS Branch (name VARCHAR(10), repo_id CHAR(40), commit_id CHAR(40), PRIMARY KEY (repo_id, name)); CREATE TABLE IF NOT EXISTS Repo (repo_id CHAR(37) PRIMARY KEY); CREATE TABLE IF NOT EXISTS RepoOwner (repo_id CHAR(37) PRIMARY KEY, owner_id TEXT); CREATE INDEX IF NOT EXISTS OwnerIndex ON RepoOwner (owner_id); CREATE TABLE IF NOT EXISTS RepoGroup (repo_id CHAR(37), group_id INTEGER, user_name TEXT, permission CHAR(15)); CREATE UNIQUE INDEX IF NOT EXISTS groupid_repoid_indx on RepoGroup (group_id, repo_id); CREATE INDEX IF NOT EXISTS repogroup_repoid_index on RepoGroup (repo_id); CREATE INDEX IF NOT EXISTS repogroup_username_indx on RepoGroup (user_name); CREATE TABLE IF NOT EXISTS InnerPubRepo (repo_id CHAR(37) PRIMARY KEY, permission CHAR(15)); CREATE TABLE IF NOT EXISTS OrgRepo (org_id INTEGER, repo_id CHAR(37), user VARCHAR(255)); CREATE UNIQUE INDEX IF NOT EXISTS repoid_indx on OrgRepo (repo_id); CREATE INDEX IF NOT EXISTS orgid_repoid_indx on OrgRepo (org_id, repo_id); CREATE INDEX IF NOT EXISTS orgrepo_orgid_user_indx on OrgRepo (org_id, user); CREATE INDEX IF NOT EXISTS orgrepo_user_indx on OrgRepo (user); CREATE TABLE IF NOT EXISTS OrgGroupRepo (org_id INTEGER, repo_id CHAR(37), group_id INTEGER, owner VARCHAR(255), permission CHAR(15)); CREATE UNIQUE INDEX IF NOT EXISTS orgid_groupid_repoid_indx on OrgGroupRepo (org_id, group_id, repo_id); CREATE INDEX IF NOT EXISTS org_repoid_index on OrgGroupRepo (repo_id); CREATE INDEX IF NOT EXISTS org_owner_indx on OrgGroupRepo (owner); CREATE TABLE IF NOT EXISTS OrgInnerPubRepo (org_id INTEGER, repo_id CHAR(37), permission CHAR(15), PRIMARY KEY (org_id, repo_id)); CREATE TABLE IF NOT EXISTS RepoUserToken (repo_id CHAR(37), email VARCHAR(255), token CHAR(41)); CREATE UNIQUE INDEX IF NOT EXISTS repo_token_indx on RepoUserToken (repo_id, token); CREATE INDEX IF NOT EXISTS repo_token_email_indx on RepoUserToken (email); CREATE TABLE IF NOT EXISTS RepoTokenPeerInfo (token CHAR(41) PRIMARY KEY, peer_id CHAR(41), peer_ip VARCHAR(50), peer_name VARCHAR(255), sync_time BIGINT, client_ver VARCHAR(20)); CREATE TABLE IF NOT EXISTS RepoSyncError (token CHAR(41) PRIMARY KEY, error_time BIGINT, error_con VARCHAR(1024)); CREATE TABLE IF NOT EXISTS RepoHead (repo_id CHAR(37) PRIMARY KEY, branch_name VARCHAR(10)); CREATE TABLE IF NOT EXISTS RepoSize (repo_id CHAR(37) PRIMARY KEY, size BIGINT UNSIGNED, head_id CHAR(41)); CREATE TABLE IF NOT EXISTS RepoHistoryLimit (repo_id CHAR(37) PRIMARY KEY, days INTEGER); CREATE TABLE IF NOT EXISTS RepoValidSince (repo_id CHAR(37) PRIMARY KEY, timestamp BIGINT); CREATE TABLE IF NOT EXISTS WebAP (repo_id CHAR(37) PRIMARY KEY, access_property CHAR(10)); CREATE TABLE IF NOT EXISTS VirtualRepo (repo_id CHAR(36) PRIMARY KEY, origin_repo CHAR(36), path TEXT, base_commit CHAR(40)); CREATE INDEX IF NOT EXISTS virtualrepo_origin_repo_idx ON VirtualRepo (origin_repo); CREATE TABLE IF NOT EXISTS GarbageRepos (repo_id CHAR(36) PRIMARY KEY); CREATE TABLE IF NOT EXISTS RepoTrash (repo_id CHAR(36) PRIMARY KEY, repo_name VARCHAR(255), head_id CHAR(40), owner_id VARCHAR(255), size BIGINT UNSIGNED, org_id INTEGER, del_time BIGINT); CREATE INDEX IF NOT EXISTS repotrash_owner_id_idx ON RepoTrash(owner_id); CREATE INDEX IF NOT EXISTS repotrash_org_id_idx ON RepoTrash(org_id); CREATE TABLE IF NOT EXISTS RepoFileCount (repo_id CHAR(36) PRIMARY KEY, file_count BIGINT UNSIGNED); CREATE TABLE IF NOT EXISTS FolderUserPerm (repo_id CHAR(36) NOT NULL, path TEXT NOT NULL, permission CHAR(15), user VARCHAR(255) NOT NULL); CREATE INDEX IF NOT EXISTS folder_user_perm_idx ON FolderUserPerm(repo_id); CREATE TABLE IF NOT EXISTS FolderGroupPerm (repo_id CHAR(36) NOT NULL, path TEXT NOT NULL, permission CHAR(15), group_id INTEGER NOT NULL); CREATE INDEX IF NOT EXISTS folder_group_perm_idx ON FolderGroupPerm(repo_id); CREATE TABLE IF NOT EXISTS FolderPermTimestamp (repo_id CHAR(36) PRIMARY KEY, timestamp INTEGER); CREATE TABLE IF NOT EXISTS WebUploadTempFiles (repo_id CHAR(40) NOT NULL, file_path TEXT NOT NULL, tmp_file_path TEXT NOT NULL); CREATE TABLE IF NOT EXISTS RepoInfo (repo_id CHAR(36) PRIMARY KEY, name VARCHAR(255) NOT NULL, update_time INTEGER, version INTEGER, is_encrypted INTEGER, last_modifier VARCHAR(255), status INTEGER DEFAULT 0, type VARCHAR(10)); CREATE INDEX IF NOT EXISTS RepoInfoTypeIndex on RepoInfo (type); CREATE TABLE IF NOT EXISTS RepoStorageId (repo_id CHAR(40) NOT NULL, storage_id VARCHAR(255) NOT NULL); CREATE TABLE IF NOT EXISTS UserQuota (user VARCHAR(255) PRIMARY KEY, quota BIGINT); CREATE TABLE IF NOT EXISTS UserShareQuota (user VARCHAR(255) PRIMARY KEY, quota BIGINT); CREATE TABLE IF NOT EXISTS OrgQuota (org_id INTEGER PRIMARY KEY, quota BIGINT); CREATE TABLE IF NOT EXISTS OrgUserQuota (org_id INTEGER, user VARCHAR(255), quota BIGINT, PRIMARY KEY (org_id, user)); CREATE TABLE IF NOT EXISTS RoleQuota (role VARCHAR(255) PRIMARY KEY, quota BIGINT); CREATE TABLE IF NOT EXISTS SeafileConf (cfg_group VARCHAR(255) NOT NULL, cfg_key VARCHAR(255) NOT NULL, value VARCHAR(255), property INTEGER); CREATE TABLE IF NOT EXISTS FileLocks (repo_id CHAR(40) NOT NULL, path TEXT NOT NULL, user_name VARCHAR(255) NOT NULL, lock_time BIGINT, expire BIGINT); CREATE INDEX IF NOT EXISTS FileLocksIndex ON FileLocks (repo_id); CREATE TABLE IF NOT EXISTS FileLockTimestamp (repo_id CHAR(40) PRIMARY KEY, update_time BIGINT NOT NULL); CREATE TABLE IF NOT EXISTS SharedRepo (repo_id CHAR(37) , from_email VARCHAR(255), to_email VARCHAR(255), permission CHAR(15)); CREATE INDEX IF NOT EXISTS RepoIdIndex on SharedRepo (repo_id); CREATE INDEX IF NOT EXISTS FromEmailIndex on SharedRepo (from_email); CREATE INDEX IF NOT EXISTS ToEmailIndex on SharedRepo (to_email); CREATE TABLE IF NOT EXISTS OrgSharedRepo (org_id INTEGER, repo_id CHAR(37) , from_email VARCHAR(255), to_email VARCHAR(255), permission CHAR(15)); CREATE INDEX IF NOT EXISTS OrgRepoIdIndex on OrgSharedRepo (org_id, repo_id); CREATE INDEX IF NOT EXISTS OrgFromEmailIndex on OrgSharedRepo (from_email); CREATE INDEX IF NOT EXISTS OrgToEmailIndex on OrgSharedRepo (to_email); CREATE INDEX IF NOT EXISTS OrgLibIdIndex on OrgSharedRepo (repo_id); CREATE TABLE IF NOT EXISTS SystemInfo (info_key VARCHAR(256), info_value VARCHAR(1024)); ================================================ FILE: scripts/sql/sqlite/user.sql ================================================ CREATE TABLE IF NOT EXISTS Binding (email TEXT, peer_id TEXT); CREATE UNIQUE INDEX IF NOT EXISTS peer_index on Binding (peer_id); CREATE TABLE IF NOT EXISTS EmailUser (id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, email TEXT, passwd TEXT, is_staff bool NOT NULL, is_active bool NOT NULL, ctime INTEGER, reference_id TEXT); CREATE UNIQUE INDEX IF NOT EXISTS email_index on EmailUser (email); CREATE UNIQUE INDEX IF NOT EXISTS reference_id_index on EmailUser (reference_id); CREATE TABLE IF NOT EXISTS LDAPConfig (cfg_group VARCHAR(255) NOT NULL, cfg_key VARCHAR(255) NOT NULL, value VARCHAR(255), property INTEGER); CREATE TABLE IF NOT EXISTS LDAPUsers (id INTEGER PRIMARY KEY AUTOINCREMENT, email TEXT NOT NULL, password TEXT NOT NULL, is_staff BOOL NOT NULL, is_active BOOL NOT NULL, extra_attrs TEXT, reference_id TEXT); CREATE UNIQUE INDEX IF NOT EXISTS ldapusers_email_index on LDAPUsers(email); CREATE UNIQUE INDEX IF NOT EXISTS ldapusers_reference_id_index on LDAPUsers(reference_id); CREATE TABLE IF NOT EXISTS UserRole (email TEXT, role TEXT, is_manual_set INTEGER DEFAULT 0); CREATE INDEX IF NOT EXISTS userrole_email_index on UserRole (email); CREATE UNIQUE INDEX IF NOT EXISTS userrole_userrole_index on UserRole (email, role); ================================================ FILE: server/Makefile.am ================================================ SUBDIRS = gc AM_CFLAGS = -DPKGDATADIR=\"$(pkgdatadir)\" \ -DPACKAGE_DATA_DIR=\""$(pkgdatadir)"\" \ -DSEAFILE_SERVER \ -DFULL_FEATURE \ -I$(top_srcdir)/include \ -I$(top_srcdir)/lib \ -I$(top_builddir)/lib \ -I$(top_srcdir)/common \ @SEARPC_CFLAGS@ \ @GLIB2_CFLAGS@ \ @MSVC_CFLAGS@ \ @LIBARCHIVE_CFLAGS@ \ @MYSQL_CFLAGS@ \ @LIBHIREDIS_CFLAGS@ \ -Wall bin_PROGRAMS = seaf-server noinst_HEADERS = web-accesstoken-mgr.h seafile-session.h \ repo-mgr.h \ share-mgr.h \ passwd-mgr.h \ quota-mgr.h \ size-sched.h \ copy-mgr.h \ http-server.h \ upload-file.h \ access-file.h \ pack-dir.h \ fileserver-config.h \ http-status-codes.h \ zip-download-mgr.h \ ../common/user-mgr.h \ ../common/group-mgr.h \ ../common/org-mgr.h \ index-blocks-mgr.h \ http-tx-mgr.h \ notif-mgr.h \ change-set.h \ metric-mgr.h seaf_server_SOURCES = \ seaf-server.c \ web-accesstoken-mgr.c seafile-session.c \ zip-download-mgr.c \ index-blocks-mgr.c \ share-mgr.c \ passwd-mgr.c \ quota-mgr.c \ repo-op.c \ repo-perm.c \ size-sched.c \ virtual-repo.c \ copy-mgr.c \ http-server.c \ upload-file.c \ access-file.c \ pack-dir.c \ fileserver-config.c \ http-tx-mgr.c \ notif-mgr.c \ change-set.c \ metric-mgr.c \ ../common/seaf-db.c \ ../common/branch-mgr.c ../common/fs-mgr.c \ ../common/config-mgr.c \ repo-mgr.c ../common/commit-mgr.c \ ../common/log.c ../common/object-list.c \ ../common/rpc-service.c \ ../common/vc-common.c \ ../common/seaf-utils.c \ ../common/obj-store.c \ ../common/obj-backend-fs.c \ ../common/seafile-crypt.c \ ../common/password-hash.c \ ../common/diff-simple.c \ ../common/mq-mgr.c \ ../common/user-mgr.c \ ../common/group-mgr.c \ ../common/org-mgr.c \ ../common/block-mgr.c \ ../common/block-backend.c \ ../common/block-backend-fs.c \ ../common/merge-new.c \ ../common/obj-cache.c \ ../common/redis-cache.c \ ../common/block-tx-utils.c seaf_server_LDADD = $(top_builddir)/lib/libseafile_common.la \ @GLIB2_LIBS@ @GOBJECT_LIBS@ @SSL_LIBS@ @LIB_RT@ @LIB_UUID@ -lsqlite3 @LIBEVENT_LIBS@ @EVHTP_LIBS@ \ $(top_builddir)/common/cdc/libcdc.la \ @SEARPC_LIBS@ @JANSSON_LIBS@ ${LIB_WS32} @ZLIB_LIBS@ \ @LIBARCHIVE_LIBS@ @LIB_ICONV@ \ @MYSQL_LIBS@ -lsqlite3 \ @CURL_LIBS@ @JWT_LIBS@ @LIBHIREDIS_LIBS@ @ARGON2_LIBS@ ================================================ FILE: server/access-file.c ================================================ #include "common.h" #ifdef HAVE_EVHTP #define DEBUG_FLAG SEAFILE_DEBUG_HTTP #include "log.h" #if defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__) #include #include #include #else #include #endif #include #include #include #include #include "seafile-object.h" #include "seafile-crypt.h" #include "utils.h" #include "seafile-session.h" #include "access-file.h" #include "zip-download-mgr.h" #include "http-server.h" #include "seaf-utils.h" #define FILE_TYPE_MAP_DEFAULT_LEN 1 #define BUFFER_SIZE 1024 * 64 struct file_type_map { char *suffix; char *type; }; typedef struct SendBlockData { evhtp_request_t *req; char *block_id; BlockHandle *handle; uint32_t bsize; uint32_t remain; char store_id[37]; int repo_version; char *user; bufferevent_data_cb saved_read_cb; bufferevent_data_cb saved_write_cb; bufferevent_event_cb saved_event_cb; void *saved_cb_arg; } SendBlockData; typedef struct SendfileData { evhtp_request_t *req; Seafile *file; SeafileCrypt *crypt; gboolean enc_init; EVP_CIPHER_CTX *ctx; BlockHandle *handle; size_t remain; int idx; char store_id[37]; int repo_version; char *user; char *token_type; bufferevent_data_cb saved_read_cb; bufferevent_data_cb saved_write_cb; bufferevent_event_cb saved_event_cb; void *saved_cb_arg; } SendfileData; typedef struct SendFileRangeData { evhtp_request_t *req; Seafile *file; BlockHandle *handle; int blk_idx; guint64 start_off; guint64 range_remain; char store_id[37]; int repo_version; char *user; char *token_type; bufferevent_data_cb saved_read_cb; bufferevent_data_cb saved_write_cb; bufferevent_event_cb saved_event_cb; void *saved_cb_arg; } SendFileRangeData; typedef struct SendDirData { evhtp_request_t *req; size_t remain; guint64 total_size; int zipfd; char *zipfile; char *token; char *user; char *token_type; char repo_id[37]; bufferevent_data_cb saved_read_cb; bufferevent_data_cb saved_write_cb; bufferevent_event_cb saved_event_cb; void *saved_cb_arg; } SendDirData; extern SeafileSession *seaf; static struct file_type_map ftmap[] = { { "txt", "text/plain" }, { "doc", "application/vnd.ms-word" }, { "docx", "application/vnd.openxmlformats-officedocument.wordprocessingml.document" }, { "ppt", "application/vnd.ms-powerpoint" }, { "pptx", "application/vnd.openxmlformats-officedocument.presentationml.presentation" }, { "xls", "application/vnd.ms-excel" }, { "xlsx", "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet" }, { "pdf", "application/pdf" }, { "zip", "application/zip"}, { "mp3", "audio/mp3" }, { "mpeg", "video/mpeg" }, { "mp4", "video/mp4" }, { "ogv", "video/ogg" }, { "mov", "video/mp4" }, { "webm", "video/webm" }, { "mkv", "video/x-matroska" }, { "jpg", "image/jpeg" }, { "JPG", "image/jpeg" }, { "jpeg", "image/jpeg" }, { "JPEG", "image/jpeg" }, { "png", "image/png" }, { "PNG", "image/png" }, { "gif", "image/gif" }, { "GIF", "image/gif" }, { "svg", "image/svg+xml" }, { "SVG", "image/svg+xml" }, { "heic", "image/heic" }, { "ico", "image/x-icon" }, { "bmp", "image/bmp" }, { "tif", "image/tiff" }, { "tiff", "image/tiff" }, { "psd", "image/vnd.adobe.photoshop" }, { "webp", "image/webp" }, { "jfif", "image/jpeg" }, { NULL, NULL }, }; static void free_sendblock_data (SendBlockData *data) { if (data->handle) { seaf_block_manager_close_block(seaf->block_mgr, data->handle); seaf_block_manager_block_handle_free(seaf->block_mgr, data->handle); } g_free (data->block_id); g_free (data->user); g_free (data); } static void free_sendfile_data (SendfileData *data) { if (data->handle) { seaf_block_manager_close_block(seaf->block_mgr, data->handle); seaf_block_manager_block_handle_free(seaf->block_mgr, data->handle); } if (data->enc_init) EVP_CIPHER_CTX_free (data->ctx); seafile_unref (data->file); g_free (data->user); g_free (data->token_type); g_free (data->crypt); g_free (data); } static void free_send_file_range_data (SendFileRangeData *data) { if (data->handle) { seaf_block_manager_close_block(seaf->block_mgr, data->handle); seaf_block_manager_block_handle_free(seaf->block_mgr, data->handle); } seafile_unref (data->file); g_free (data->user); g_free (data->token_type); g_free (data); } static void free_senddir_data (SendDirData *data) { close (data->zipfd); zip_download_mgr_del_zip_progress (seaf->zip_download_mgr, data->token); g_free (data->user); g_free (data->token_type); g_free (data->token); g_free (data); } static void write_block_data_cb (struct bufferevent *bev, void *ctx) { SendBlockData *data = ctx; char *blk_id; BlockHandle *handle; char buf[1024 * 64]; int n; blk_id = data->block_id; if (!data->handle) { data->handle = seaf_block_manager_open_block(seaf->block_mgr, data->store_id, data->repo_version, blk_id, BLOCK_READ); if (!data->handle) { seaf_warning ("Failed to open block %s:%s\n", data->store_id, blk_id); goto err; } data->remain = data->bsize; } handle = data->handle; n = seaf_block_manager_read_block(seaf->block_mgr, handle, buf, sizeof(buf)); data->remain -= n; if (n < 0) { seaf_warning ("Error when reading from block %s:%s.\n", data->store_id, blk_id); goto err; } else if (n == 0) { /* We've read up the data of this block, finish. */ seaf_block_manager_close_block (seaf->block_mgr, handle); seaf_block_manager_block_handle_free (seaf->block_mgr, handle); data->handle = NULL; /* Recover evhtp's callbacks */ bev->readcb = data->saved_read_cb; bev->writecb = data->saved_write_cb; bev->errorcb = data->saved_event_cb; bev->cbarg = data->saved_cb_arg; /* Resume reading incomming requests. */ evhtp_request_resume (data->req); evhtp_send_reply_end (data->req); send_statistic_msg (data->store_id, data->user, "web-file-download", (guint64)data->bsize); free_sendblock_data (data); return; } /* OK, we've got some data to send. */ bufferevent_write (bev, buf, n); return; err: evhtp_connection_free (evhtp_request_get_connection (data->req)); free_sendblock_data (data); return; } static void write_data_cb (struct bufferevent *bev, void *ctx) { SendfileData *data = ctx; char *blk_id; BlockHandle *handle; char buf[1024 * 64]; int n; next: blk_id = data->file->blk_sha1s[data->idx]; if (!data->handle) { data->handle = seaf_block_manager_open_block(seaf->block_mgr, data->store_id, data->repo_version, blk_id, BLOCK_READ); if (!data->handle) { seaf_warning ("Failed to open block %s:%s\n", data->store_id, blk_id); goto err; } BlockMetadata *bmd; bmd = seaf_block_manager_stat_block_by_handle (seaf->block_mgr, data->handle); if (!bmd) goto err; data->remain = bmd->size; g_free (bmd); if (data->crypt) { if (seafile_decrypt_init (&data->ctx, data->crypt->version, (unsigned char *)data->crypt->key, (unsigned char *)data->crypt->iv) < 0) { seaf_warning ("Failed to init decrypt.\n"); goto err; } data->enc_init = TRUE; } } handle = data->handle; n = seaf_block_manager_read_block(seaf->block_mgr, handle, buf, sizeof(buf)); data->remain -= n; if (n < 0) { seaf_warning ("Error when reading from block %s.\n", blk_id); goto err; } else if (n == 0) { /* We've read up the data of this block, finish or try next block. */ seaf_block_manager_close_block (seaf->block_mgr, handle); seaf_block_manager_block_handle_free (seaf->block_mgr, handle); data->handle = NULL; if (data->crypt != NULL) { EVP_CIPHER_CTX_free (data->ctx); data->enc_init = FALSE; } if (data->idx == data->file->n_blocks - 1) { /* Recover evhtp's callbacks */ bev->readcb = data->saved_read_cb; bev->writecb = data->saved_write_cb; bev->errorcb = data->saved_event_cb; bev->cbarg = data->saved_cb_arg; /* Resume reading incomming requests. */ evhtp_request_resume (data->req); evhtp_send_reply_end (data->req); char *oper = "web-file-download"; if (g_strcmp0(data->token_type, "download-link") == 0) oper = "link-file-download"; send_statistic_msg(data->store_id, data->user, oper, (guint64)data->file->file_size); free_sendfile_data (data); return; } ++(data->idx); goto next; } /* OK, we've got some data to send. */ if (data->crypt != NULL) { char *dec_out; int dec_out_len = -1; struct evbuffer *tmp_buf; dec_out = g_new (char, n + 16); if (!dec_out) { seaf_warning ("Failed to alloc memory.\n"); goto err; } int ret = EVP_DecryptUpdate (data->ctx, (unsigned char *)dec_out, &dec_out_len, (unsigned char *)buf, n); if (ret == 0) { seaf_warning ("Decrypt block %s:%s failed.\n", data->store_id, blk_id); g_free (dec_out); goto err; } tmp_buf = evbuffer_new (); evbuffer_add (tmp_buf, dec_out, dec_out_len); /* If it's the last piece of a block, call decrypt_final() * to decrypt the possible partial block. */ if (data->remain == 0) { ret = EVP_DecryptFinal_ex (data->ctx, (unsigned char *)dec_out, &dec_out_len); if (ret == 0) { seaf_warning ("Decrypt block %s:%s failed.\n", data->store_id, blk_id); evbuffer_free (tmp_buf); g_free (dec_out); goto err; } evbuffer_add (tmp_buf, dec_out, dec_out_len); } /* This may call write_data_cb() recursively (by libevent_openssl). * SendfileData struct may be free'd in the recursive calls. * So don't use "data" variable after here. */ bufferevent_write_buffer (bev, tmp_buf); evbuffer_free (tmp_buf); g_free (dec_out); } else { bufferevent_write (bev, buf, n); } return; err: evhtp_connection_free (evhtp_request_get_connection (data->req)); free_sendfile_data (data); return; } static void write_dir_data_cb (struct bufferevent *bev, void *ctx) { SendDirData *data = ctx; char buf[64 * 1024]; int n; n = readn (data->zipfd, buf, sizeof(buf)); if (n < 0) { seaf_warning ("Failed to read zipfile %s: %s.\n", data->zipfile, strerror (errno)); evhtp_connection_free (evhtp_request_get_connection (data->req)); free_senddir_data (data); } else if (n > 0) { bufferevent_write (bev, buf, n); data->remain -= n; if (data->remain == 0) { /* Recover evhtp's callbacks */ bev->readcb = data->saved_read_cb; bev->writecb = data->saved_write_cb; bev->errorcb = data->saved_event_cb; bev->cbarg = data->saved_cb_arg; /* Resume reading incomming requests. */ evhtp_request_resume (data->req); evhtp_send_reply_end (data->req); char *oper = "web-file-download"; if (g_strcmp0(data->token_type, "download-dir-link") == 0 || g_strcmp0(data->token_type, "download-multi-link") == 0) oper = "link-file-download"; send_statistic_msg(data->repo_id, data->user, oper, data->total_size); free_senddir_data (data); return; } } } static void my_block_event_cb (struct bufferevent *bev, short events, void *ctx) { SendBlockData *data = ctx; data->saved_event_cb (bev, events, data->saved_cb_arg); /* Free aux data. */ free_sendblock_data (data); } static void my_event_cb (struct bufferevent *bev, short events, void *ctx) { SendfileData *data = ctx; data->saved_event_cb (bev, events, data->saved_cb_arg); /* Free aux data. */ free_sendfile_data (data); } static void file_range_event_cb (struct bufferevent *bev, short events, void *ctx) { SendFileRangeData *data = ctx; data->saved_event_cb (bev, events, data->saved_cb_arg); /* Free aux data. */ free_send_file_range_data (data); } static void my_dir_event_cb (struct bufferevent *bev, short events, void *ctx) { SendDirData *data = ctx; data->saved_event_cb (bev, events, data->saved_cb_arg); /* Free aux data. */ free_senddir_data (data); } static char * parse_content_type(const char *filename) { char *p; int i; if ((p = strrchr(filename, '.')) == NULL) return NULL; p++; char *lower = g_utf8_strdown (p, strlen(p)); for (i = 0; ftmap[i].suffix != NULL; i++) { if (strcmp(lower, ftmap[i].suffix) == 0) { g_free (lower); return ftmap[i].type; } } g_free (lower); return NULL; } static gboolean test_firefox (evhtp_request_t *req) { const char *user_agent = evhtp_header_find (req->headers_in, "User-Agent"); if (!user_agent) return FALSE; GString *s = g_string_new (user_agent); if (g_strrstr (g_string_ascii_down (s)->str, "firefox")) { g_string_free (s, TRUE); return TRUE; } else { g_string_free (s, TRUE); return FALSE; } } static int do_file(evhtp_request_t *req, SeafRepo *repo, const char *file_id, const char *filename, const char *operation, SeafileCryptKey *crypt_key, const char *user) { Seafile *file; char *type = NULL; char file_size[255]; gchar *content_type = NULL; char cont_filename[SEAF_PATH_MAX]; char *key_hex, *iv_hex; unsigned char enc_key[32], enc_iv[16]; SeafileCrypt *crypt = NULL; SendfileData *data; char *policy = "sandbox"; file = seaf_fs_manager_get_seafile(seaf->fs_mgr, repo->store_id, repo->version, file_id); if (file == NULL) return -1; if (crypt_key != NULL) { g_object_get (crypt_key, "key", &key_hex, "iv", &iv_hex, NULL); if (repo->enc_version == 1) hex_to_rawdata (key_hex, enc_key, 16); else hex_to_rawdata (key_hex, enc_key, 32); hex_to_rawdata (iv_hex, enc_iv, 16); crypt = seafile_crypt_new (repo->enc_version, enc_key, enc_iv); g_free (key_hex); g_free (iv_hex); } evhtp_headers_add_header(req->headers_out, evhtp_header_new("Access-Control-Allow-Origin", "*", 1, 1)); type = parse_content_type(filename); if (type != NULL) { if (strstr(type, "text")) { content_type = g_strjoin("; ", type, "charset=gbk", NULL); } else { content_type = g_strdup (type); } evhtp_headers_add_header(req->headers_out, evhtp_header_new("Content-Type", content_type, 1, 1)); g_free (content_type); if (g_strcmp0 (type, "image/svg+xml") == 0) { evhtp_headers_add_header(req->headers_out, evhtp_header_new("Content-Security-Policy", policy, 1, 1)); } } else evhtp_headers_add_header (req->headers_out, evhtp_header_new("Content-Type", "application/octet-stream", 1, 1)); snprintf(file_size, sizeof(file_size), "%"G_GINT64_FORMAT"", file->file_size); evhtp_headers_add_header (req->headers_out, evhtp_header_new("Content-Length", file_size, 1, 1)); char *esc_filename = g_uri_escape_string(filename, NULL, FALSE); if (strcmp(operation, "download") == 0 || strcmp(operation, "download-link") == 0) { /* Safari doesn't support 'utf8', 'utf-8' is compatible with most of browsers. */ snprintf(cont_filename, SEAF_PATH_MAX, "attachment;filename*=utf-8''%s;filename=\"%s\"", esc_filename, filename); } else { snprintf(cont_filename, SEAF_PATH_MAX, "inline;filename*=utf-8''%s;filename=\"%s\"", esc_filename, filename); } g_free (esc_filename); evhtp_headers_add_header(req->headers_out, evhtp_header_new("Content-Disposition", cont_filename, 1, 1)); if (g_strcmp0 (type, "image/jpg") != 0) { evhtp_headers_add_header(req->headers_out, evhtp_header_new("X-Content-Type-Options", "nosniff", 1, 1)); } /* HEAD Request */ if (evhtp_request_get_method(req) == htp_method_HEAD) { evhtp_send_reply (req, EVHTP_RES_OK); seafile_unref (file); g_free (crypt); return 0; } /* If it's an empty file, send an empty reply. */ if (file->n_blocks == 0) { evhtp_send_reply (req, EVHTP_RES_OK); seafile_unref (file); g_free (crypt); return 0; } data = g_new0 (SendfileData, 1); data->req = req; data->file = file; data->crypt = crypt; data->user = g_strdup(user); data->token_type = g_strdup (operation); memcpy (data->store_id, repo->store_id, 36); data->repo_version = repo->version; /* We need to overwrite evhtp's callback functions to * write file data piece by piece. */ struct bufferevent *bev = evhtp_request_get_bev (req); data->saved_read_cb = bev->readcb; data->saved_write_cb = bev->writecb; data->saved_event_cb = bev->errorcb; data->saved_cb_arg = bev->cbarg; bufferevent_setcb (bev, NULL, write_data_cb, my_event_cb, data); /* Block any new request from this connection before finish * handling this request. */ evhtp_request_pause (req); /* Kick start data transfer by sending out http headers. */ evhtp_send_reply_start(req, EVHTP_RES_OK); return 0; } // get block handle for range start static BlockHandle * get_start_block_handle (const char *store_id, int version, Seafile *file, guint64 start, int *blk_idx) { BlockHandle *handle = NULL; BlockMetadata *bmd; char *blkid; guint64 tolsize = 0; int i = 0; for (; i < file->n_blocks; i++) { blkid = file->blk_sha1s[i]; bmd = seaf_block_manager_stat_block(seaf->block_mgr, store_id, version, blkid); if (!bmd) return NULL; if (start < tolsize + bmd->size) { g_free (bmd); break; } tolsize += bmd->size; g_free (bmd); } /* beyond the file size */ if (i == file->n_blocks) return NULL; handle = seaf_block_manager_open_block(seaf->block_mgr, store_id, version, blkid, BLOCK_READ); if (!handle) { seaf_warning ("Failed to open block %s:%s.\n", store_id, blkid); return NULL; } /* trim the offset in a block */ if (start > tolsize) { char *tmp = (char *)malloc(sizeof(*tmp) * (start - tolsize)); if (!tmp) goto err; int n = seaf_block_manager_read_block(seaf->block_mgr, handle, tmp, start-tolsize); if (n != start-tolsize) { seaf_warning ("Failed to read block %s:%s.\n", store_id, blkid); free (tmp); goto err; } free (tmp); } *blk_idx = i; return handle; err: seaf_block_manager_close_block(seaf->block_mgr, handle); seaf_block_manager_block_handle_free (seaf->block_mgr, handle); return NULL; } static void finish_file_range_request (struct bufferevent *bev, SendFileRangeData *data) { /* Recover evhtp's callbacks */ bev->readcb = data->saved_read_cb; bev->writecb = data->saved_write_cb; bev->errorcb = data->saved_event_cb; bev->cbarg = data->saved_cb_arg; /* Resume reading incomming requests. */ evhtp_request_resume (data->req); evhtp_send_reply_end (data->req); free_send_file_range_data (data); } static void write_file_range_cb (struct bufferevent *bev, void *ctx) { SendFileRangeData *data = ctx; char *blk_id; char buf[BUFFER_SIZE]; int bsize; int n; if (data->blk_idx == -1) { // start to send block data->handle = get_start_block_handle (data->store_id, data->repo_version, data->file, data->start_off, &data->blk_idx); if (!data->handle) goto err; } next: blk_id = data->file->blk_sha1s[data->blk_idx]; if (!data->handle) { data->handle = seaf_block_manager_open_block(seaf->block_mgr, data->store_id, data->repo_version, blk_id, BLOCK_READ); if (!data->handle) { seaf_warning ("Failed to open block %s:%s\n", data->store_id, blk_id); goto err; } } bsize = data->range_remain < BUFFER_SIZE ? data->range_remain : BUFFER_SIZE; n = seaf_block_manager_read_block(seaf->block_mgr, data->handle, buf, bsize); data->range_remain -= n; if (n < 0) { seaf_warning ("Error when reading from block %s:%s.\n", data->store_id, blk_id); goto err; } else if (n == 0) { seaf_block_manager_close_block (seaf->block_mgr, data->handle); seaf_block_manager_block_handle_free (seaf->block_mgr, data->handle); data->handle = NULL; ++data->blk_idx; goto next; } bufferevent_write (bev, buf, n); if (data->range_remain == 0) { if (data->start_off + n >= data->file->file_size) { char *oper = "web-file-download"; if (g_strcmp0(data->token_type, "download-link") == 0) oper = "link-file-download"; send_statistic_msg (data->store_id, data->user, oper, (guint64)data->file->file_size); } finish_file_range_request (bev, data); } return; err: evhtp_connection_free (evhtp_request_get_connection (data->req)); free_send_file_range_data (data); } // parse range offset, only support single range (-num, num-num, num-) static gboolean parse_range_val (const char *byte_ranges, guint64 *pstart, guint64 *pend, guint64 fsize) { char *ranges = strchr(byte_ranges, '='); if (!ranges) { return FALSE; } char *minus; char *end_ptr; gboolean error = FALSE; char *ranges_dup = g_strdup (ranges + 1); char *tmp = ranges_dup; guint64 start; guint64 end; minus = strchr(tmp, '-'); if (!minus) return FALSE; if (minus == tmp) { // -num mode start = strtoll(tmp, &end_ptr, 10); if (start == 0) { // range format is invalid error = TRUE; } else if (*end_ptr == '\0') { end = fsize - 1; start += fsize; } else { error = TRUE; } } else if (*(minus + 1) == '\0') { // num- mode start = strtoll(tmp, &end_ptr, 10); if (end_ptr == minus) { end = fsize - 1; } else { error = TRUE; } } else { // num-num mode start = strtoll(tmp, &end_ptr, 10); if (end_ptr == minus) { end = strtoll(minus + 1, &end_ptr, 10); if (*end_ptr != '\0') { error = TRUE; } } else { error = TRUE; } } g_free (ranges_dup); if (error) return FALSE; if (end > fsize - 1) { end = fsize - 1; } if (start > end) { // Range format is valid, but range number is invalid return FALSE; } *pstart = start; *pend = end; return TRUE; } static void set_resp_disposition (evhtp_request_t *req, const char *operation, const char *filename) { char *cont_filename = NULL; char *esc_filename = g_uri_escape_string(filename, NULL, FALSE); if (strcmp(operation, "download") == 0) { cont_filename = g_strdup_printf("attachment;filename*=utf-8''%s;filename=\"%s\"", esc_filename, filename); } else { cont_filename = g_strdup_printf("inline;filename*=utf-8''%s;filename=\"%s\"", esc_filename, filename); } evhtp_headers_add_header(req->headers_out, evhtp_header_new("Content-Disposition", cont_filename, 0, 1)); g_free (esc_filename); g_free (cont_filename); } static int do_file_range (evhtp_request_t *req, SeafRepo *repo, const char *file_id, const char *filename, const char *operation, const char *byte_ranges, const char *user) { Seafile *file; SendFileRangeData *data = NULL; guint64 start; guint64 end; char *policy = "sandbox"; file = seaf_fs_manager_get_seafile(seaf->fs_mgr, repo->store_id, repo->version, file_id); if (file == NULL) return -1; /* If it's an empty file, send an empty reply. */ if (file->n_blocks == 0) { evhtp_send_reply (req, EVHTP_RES_OK); seafile_unref (file); return 0; } if (!parse_range_val (byte_ranges, &start, &end, file->file_size)) { seafile_unref (file); char *con_range = g_strdup_printf ("bytes */%"G_GUINT64_FORMAT, file->file_size); evhtp_headers_add_header (req->headers_out, evhtp_header_new("Content-Range", con_range, 0, 1)); g_free (con_range); evhtp_send_reply (req, EVHTP_RES_RANGENOTSC); return 0; } evhtp_headers_add_header (req->headers_out, evhtp_header_new ("Accept-Ranges", "bytes", 0, 0)); char *content_type = NULL; char *type = parse_content_type (filename); if (type != NULL) { if (strstr(type, "text")) { content_type = g_strjoin("; ", type, "charset=gbk", NULL); } else { content_type = g_strdup (type); } if (g_strcmp0 (type, "image/svg+xml") == 0) { evhtp_headers_add_header(req->headers_out, evhtp_header_new("Content-Security-Policy", policy, 1, 1)); } } else { content_type = g_strdup ("application/octet-stream"); } evhtp_headers_add_header (req->headers_out, evhtp_header_new ("Content-Type", content_type, 0, 1)); g_free (content_type); char *con_len = g_strdup_printf ("%"G_GUINT64_FORMAT, end-start+1); evhtp_headers_add_header (req->headers_out, evhtp_header_new("Content-Length", con_len, 0, 1)); g_free (con_len); char *con_range = g_strdup_printf ("%s %"G_GUINT64_FORMAT"-%"G_GUINT64_FORMAT "/%"G_GUINT64_FORMAT, "bytes", start, end, file->file_size); evhtp_headers_add_header (req->headers_out, evhtp_header_new ("Content-Range", con_range, 0, 1)); g_free (con_range); set_resp_disposition (req, operation, filename); if (g_strcmp0 (type, "image/jpg") != 0) { evhtp_headers_add_header(req->headers_out, evhtp_header_new("X-Content-Type-Options", "nosniff", 1, 1)); } data = g_new0 (SendFileRangeData, 1); if (!data) { seafile_unref (file); return -1; } data->req = req; data->file = file; data->blk_idx = -1; data->start_off = start; data->range_remain = end-start+1; data->user = g_strdup(user); data->token_type = g_strdup (operation); memcpy (data->store_id, repo->store_id, 36); data->repo_version = repo->version; /* We need to overwrite evhtp's callback functions to * write file data piece by piece. */ struct bufferevent *bev = evhtp_request_get_bev (req); data->saved_read_cb = bev->readcb; data->saved_write_cb = bev->writecb; data->saved_event_cb = bev->errorcb; data->saved_cb_arg = bev->cbarg; bufferevent_setcb (bev, NULL, write_file_range_cb, file_range_event_cb, data); /* Block any new request from this connection before finish * handling this request. */ evhtp_request_pause (req); /* Kick start data transfer by sending out http headers. */ evhtp_send_reply_start(req, EVHTP_RES_PARTIAL); return 0; } static int start_download_zip_file (evhtp_request_t *req, const char *token, const char *zipname, char *zipfile, const char *repo_id, const char *user, const char *token_type) { SeafStat st; char file_size[255]; char cont_filename[SEAF_PATH_MAX]; int zipfd = 0; if (seaf_stat(zipfile, &st) < 0) { seaf_warning ("Failed to stat %s: %s.\n", zipfile, strerror(errno)); return -1; } evhtp_headers_add_header(req->headers_out, evhtp_header_new("Content-Type", "application/zip", 1, 1)); snprintf (file_size, sizeof(file_size), "%"G_GUINT64_FORMAT"", st.st_size); evhtp_headers_add_header (req->headers_out, evhtp_header_new("Content-Length", file_size, 1, 1)); char *zippath = g_strdup_printf("%s.zip", zipname); char *esc_zippath = g_uri_escape_string(zippath, NULL, FALSE); snprintf(cont_filename, SEAF_PATH_MAX, "attachment;filename*=utf-8''%s;filename=\"%s\"", esc_zippath, zippath); g_free (zippath); g_free (esc_zippath); evhtp_headers_add_header(req->headers_out, evhtp_header_new("Content-Disposition", cont_filename, 1, 1)); zipfd = g_open (zipfile, O_RDONLY | O_BINARY, 0); if (zipfd < 0) { seaf_warning ("Failed to open zipfile %s: %s.\n", zipfile, strerror(errno)); return -1; } SendDirData *data; data = g_new0 (SendDirData, 1); data->req = req; data->zipfd = zipfd; data->zipfile = zipfile; data->token = g_strdup (token); data->remain = st.st_size; data->total_size = (guint64)st.st_size; data->user = g_strdup (user); data->token_type = g_strdup (token_type); snprintf(data->repo_id, sizeof(data->repo_id), "%s", repo_id); /* We need to overwrite evhtp's callback functions to * write file data piece by piece. */ struct bufferevent *bev = evhtp_request_get_bev (req); data->saved_read_cb = bev->readcb; data->saved_write_cb = bev->writecb; data->saved_event_cb = bev->errorcb; data->saved_cb_arg = bev->cbarg; bufferevent_setcb (bev, NULL, write_dir_data_cb, my_dir_event_cb, data); /* Block any new request from this connection before finish * handling this request. */ evhtp_request_pause (req); /* Kick start data transfer by sending out http headers. */ evhtp_send_reply_start(req, EVHTP_RES_OK); return 0; } static void set_etag (evhtp_request_t *req, const char *file_id) { evhtp_kv_t *kv; kv = evhtp_kv_new ("ETag", file_id, 1, 1); evhtp_kvs_add_kv (req->headers_out, kv); } static void set_no_cache (evhtp_request_t *req, gboolean private_cache) { evhtp_kv_t *kv; if (private_cache) { kv = evhtp_kv_new ("Cache-Control", "private, no-cache", 1, 1); } else { kv = evhtp_kv_new ("Cache-Control", "public, no-cache", 1, 1); } evhtp_kvs_add_kv (req->headers_out, kv); } static gboolean can_use_cached_content (evhtp_request_t *req) { if (evhtp_kv_find (req->headers_in, "If-Modified-Since") != NULL) { evhtp_send_reply (req, EVHTP_RES_NOTMOD); return TRUE; } char http_date[256]; evhtp_kv_t *kv; time_t now = time(NULL); /* Set Last-Modified header if the client gets this file * for the first time. So that the client will set * If-Modified-Since header the next time it gets the same * file. */ #ifndef WIN32 strftime (http_date, sizeof(http_date), "%a, %d %b %Y %T GMT", gmtime(&now)); #else strftime (http_date, sizeof(http_date), "%a, %d %b %Y %H:%M:%S GMT", gmtime(&now)); #endif kv = evhtp_kv_new ("Last-Modified", http_date, 1, 1); evhtp_kvs_add_kv (req->headers_out, kv); kv = evhtp_kv_new ("Cache-Control", "max-age=3600", 1, 1); evhtp_kvs_add_kv (req->headers_out, kv); return FALSE; } static void access_zip_cb (evhtp_request_t *req, void *arg) { char *token; SeafileWebAccess *info = NULL; char *info_str = NULL; json_t *info_obj = NULL; json_error_t jerror; char *filename = NULL; char *repo_id = NULL; char *user = NULL; char *zip_file_path; char *token_type = NULL; const char *error = NULL; int error_code; char **parts = g_strsplit (req->uri->path->full + 1, "/", 0); if (g_strv_length (parts) != 2) { error = "Invalid URL\n"; error_code = EVHTP_RES_BADREQ; goto out; } token = parts[1]; info = seaf_web_at_manager_query_access_token (seaf->web_at_mgr, token); // Here only check token exist, follow will get zip file path, if zip file path exist // then the token is valid, because it pass some validations in zip stage if (!info) { error = "Access token not found\n"; error_code = EVHTP_RES_FORBIDDEN; goto out; } g_object_get (info, "obj_id", &info_str, NULL); if (!info_str) { seaf_warning ("Invalid obj_id for token: %s.\n", token); error = "Internal server error\n"; error_code = EVHTP_RES_SERVERR; goto out; } info_obj = json_loadb (info_str, strlen(info_str), 0, &jerror); if (!info_obj) { seaf_warning ("Failed to parse obj_id field: %s for token: %s.\n", jerror.text, token); error = "Internal server error\n"; error_code = EVHTP_RES_SERVERR; goto out; } if (json_object_has_member (info_obj, "dir_name")) { // Download dir filename = g_strdup (json_object_get_string_member (info_obj, "dir_name")); } else if (json_object_has_member (info_obj, "file_list")) { // Download multi time_t now = time(NULL); char date_str[11]; strftime(date_str, sizeof(date_str), "%Y-%m-%d", localtime(&now)); filename = g_strconcat (MULTI_DOWNLOAD_FILE_PREFIX, date_str, NULL); } else { seaf_warning ("No dir_name or file_list in obj_id for token: %s.\n", token); error = "Internal server error\n"; error_code = EVHTP_RES_SERVERR; goto out; } zip_file_path = zip_download_mgr_get_zip_file_path (seaf->zip_download_mgr, token); if (!zip_file_path) { g_object_get (info, "repo_id", &repo_id, NULL); seaf_warning ("Failed to get zip file path for %s in repo %.8s, token:[%s].\n", filename, repo_id, token); error = "Internal server error\n"; error_code = EVHTP_RES_SERVERR; goto out; } if (can_use_cached_content (req)) { // Clean zip progress related resource zip_download_mgr_del_zip_progress (seaf->zip_download_mgr, token); goto out; } g_object_get (info, "username", &user, NULL); g_object_get (info, "repo_id", &repo_id, NULL); g_object_get (info, "op", &token_type, NULL); int ret = start_download_zip_file (req, token, filename, zip_file_path, repo_id, user, token_type); if (ret < 0) { seaf_warning ("Failed to start download zip file: %s for token: %s", filename, token); error = "Internal server error\n"; error_code = EVHTP_RES_SERVERR; } out: g_strfreev (parts); if (info) g_object_unref (info); if (info_str) g_free (info_str); if (info_obj) json_decref (info_obj); if (filename) g_free (filename); if (repo_id) g_free (repo_id); if (user) g_free (user); if (token_type) g_free (token_type); if (error) { evbuffer_add_printf(req->buffer_out, "%s\n", error); evhtp_send_reply(req, error_code); } } /* static void access_zip_link_cb (evhtp_request_t *req, void *arg) { char *token; char *user = NULL; char *zip_file_path; char *zip_file_name; const char *repo_id = NULL; const char *task_id = NULL; const char *error = NULL; int error_code; SeafileShareLinkInfo *info = NULL; char **parts = g_strsplit (req->uri->path->full + 1, "/", 0); if (g_strv_length (parts) != 2) { error = "Invalid URL\n"; error_code = EVHTP_RES_BADREQ; goto out; } token = parts[1]; task_id = evhtp_kv_find (req->uri->query, "task_id"); if (!task_id) { error = "No task_id\n"; error_code = EVHTP_RES_BADREQ; goto out; } info = http_tx_manager_query_share_link_info (token, "dir"); if (!info) { error = "Access token not found\n"; error_code = EVHTP_RES_FORBIDDEN; goto out; } repo_id = seafile_share_link_info_get_repo_id (info); user = seaf_repo_manager_get_repo_owner (seaf->repo_mgr, repo_id); zip_file_path = zip_download_mgr_get_zip_file_path (seaf->zip_download_mgr, task_id); if (!zip_file_path) { seaf_warning ("Failed to get zip file path in repo %.8s, task id:[%s].\n", repo_id, task_id); error = "Internal server error\n"; error_code = EVHTP_RES_SERVERR; goto out; } zip_file_name = zip_download_mgr_get_zip_file_name (seaf->zip_download_mgr, task_id); if (!zip_file_name) { seaf_warning ("Failed to get zip file name in repo %.8s, task id:[%s].\n", repo_id, task_id); error = "Internal server error\n"; error_code = EVHTP_RES_SERVERR; goto out; } if (can_use_cached_content (req)) { // Clean zip progress related resource zip_download_mgr_del_zip_progress (seaf->zip_download_mgr, task_id); goto out; } int ret = start_download_zip_file (req, task_id, zip_file_name, zip_file_path, repo_id, user, "download-multi-link"); if (ret < 0) { seaf_warning ("Failed to start download zip file: %s for task: %s", zip_file_name, task_id); error = "Internal server error\n"; error_code = EVHTP_RES_SERVERR; } out: g_strfreev (parts); if (info) g_object_unref (info); if (user) g_free (user); if (error) { evbuffer_add_printf(req->buffer_out, "%s\n", error); evhtp_send_reply(req, error_code); } } */ static void access_cb(evhtp_request_t *req, void *arg) { SeafRepo *repo = NULL; char *error = NULL; char *token = NULL; char *filename = NULL; char *dec_filename = NULL; const char *repo_id = NULL; const char *data = NULL; const char *operation = NULL; const char *user = NULL; const char *byte_ranges = NULL; int error_code = EVHTP_RES_BADREQ; SeafileCryptKey *key = NULL; SeafileWebAccess *webaccess = NULL; /* Skip the first '/'. */ char **parts = g_strsplit (req->uri->path->full + 1, "/", 0); if (!parts || g_strv_length (parts) < 3 || strcmp (parts[0], "files") != 0) { error = "Invalid URL"; goto on_error; } token = parts[1]; filename = parts[2]; // The filename is url-encoded. dec_filename = g_uri_unescape_string(filename, NULL); webaccess = seaf_web_at_manager_query_access_token (seaf->web_at_mgr, token); if (!webaccess) { error = "Access token not found"; error_code = EVHTP_RES_FORBIDDEN; goto on_error; } repo_id = seafile_web_access_get_repo_id (webaccess); data = seafile_web_access_get_obj_id (webaccess); operation = seafile_web_access_get_op (webaccess); user = seafile_web_access_get_username (webaccess); if (strcmp(operation, "view") != 0 && strcmp(operation, "download") != 0 && strcmp(operation, "download-link") != 0) { error = "Operation does not match access token."; error_code = EVHTP_RES_FORBIDDEN; goto on_error; } set_etag (req, data); if (can_use_cached_content (req)) { goto success; } byte_ranges = evhtp_kv_find (req->headers_in, "Range"); repo = seaf_repo_manager_get_repo(seaf->repo_mgr, repo_id); if (!repo) { error = "Bad repo id\n"; goto on_error; } if (repo->encrypted) { key = seaf_passwd_manager_get_decrypt_key (seaf->passwd_mgr, repo_id, user); if (!key) { error = "Repo is encrypted. Please provide password to view it."; goto on_error; } } if (!seaf_fs_manager_object_exists (seaf->fs_mgr, repo->store_id, repo->version, data)) { error = "Invalid file id\n"; goto on_error; } if (!repo->encrypted && byte_ranges) { if (do_file_range (req, repo, data, dec_filename, operation, byte_ranges, user) < 0) { error = "Internal server error\n"; error_code = EVHTP_RES_SERVERR; goto on_error; } } else if (do_file(req, repo, data, dec_filename, operation, key, user) < 0) { error = "Internal server error\n"; error_code = EVHTP_RES_SERVERR; goto on_error; } success: g_free (dec_filename); g_strfreev (parts); if (repo != NULL) seaf_repo_unref (repo); if (key != NULL) g_object_unref (key); if (webaccess) g_object_unref (webaccess); return; on_error: g_free (dec_filename); g_strfreev (parts); if (repo != NULL) seaf_repo_unref (repo); if (key != NULL) g_object_unref (key); if (webaccess != NULL) g_object_unref (webaccess); evbuffer_add_printf(req->buffer_out, "%s\n", error); evhtp_send_reply(req, error_code); } static void access_v2_cb(evhtp_request_t *req, void *arg) { SeafRepo *repo = NULL; char *error_str = NULL; char *err_msg = NULL; char *token = NULL; char *user = NULL; char *dec_path = NULL; char *rpath = NULL; char *filename = NULL; char *file_id = NULL; char *ip_addr = NULL; const char *repo_id = NULL; const char *path = NULL; const char *operation = NULL; const char *byte_ranges = NULL; const char *auth_token = NULL; const char *cookie = NULL; const char *user_agent = NULL; int error_code = EVHTP_RES_BADREQ; SeafileCryptKey *key = NULL; GError *error = NULL; /* Skip the first '/'. */ char **parts = g_strsplit (req->uri->path->full + 1, "/", 4); if (!parts || g_strv_length (parts) < 4 || strcmp (parts[2], "files") != 0) { error_str = "Invalid URL\n"; goto out; } repo_id = parts[1]; path = parts[3]; if (!path) { error_str = "No file path\n"; goto out; } dec_path = g_uri_unescape_string(path, NULL); rpath = format_dir_path (dec_path); filename = g_path_get_basename (rpath); operation = evhtp_kv_find (req->uri->query, "op"); if (!operation) { error_str = "No operation\n"; goto out; } if (strcmp(operation, "view") != 0 && strcmp(operation, "download") != 0) { error_str = "Operation is neither view or download\n"; goto out; } auth_token = evhtp_kv_find (req->headers_in, "Authorization"); token = seaf_parse_auth_token (auth_token); cookie = evhtp_kv_find (req->headers_in, "Cookie"); ip_addr = get_client_ip_addr (req); user_agent = evhtp_header_find (req->headers_in, "User-Agent"); if (!token && !cookie) { error_str = "Both token and cookie are not set\n"; goto out; } int status = HTTP_OK; if (http_tx_manager_check_file_access (repo_id, token, cookie, dec_path, "download", ip_addr, user_agent, &user, &status, &err_msg) < 0) { if (status != HTTP_OK) { error_str = err_msg; error_code = status; } else { error_str = "Internal server error\n"; error_code = EVHTP_RES_SERVERR; } goto out; } repo = seaf_repo_manager_get_repo(seaf->repo_mgr, repo_id); if (!repo) { error_str = "Bad repo id\n"; goto out; } file_id = seaf_fs_manager_get_seafile_id_by_path (seaf->fs_mgr, repo->store_id, repo->version, repo->root_id, rpath, &error); if (!file_id) { error_str = "Invalid file_path\n"; if (error) g_clear_error(&error); goto out; } const char *etag = evhtp_kv_find (req->headers_in, "If-None-Match"); if (g_strcmp0 (etag, file_id) == 0) { evhtp_send_reply (req, EVHTP_RES_NOTMOD); error_code = EVHTP_RES_OK; goto out; } set_etag (req, file_id); set_no_cache (req, TRUE); byte_ranges = evhtp_kv_find (req->headers_in, "Range"); if (repo->encrypted) { key = seaf_passwd_manager_get_decrypt_key (seaf->passwd_mgr, repo_id, user); if (!key) { error_str = "Repo is encrypted. Please provide password to view it."; goto out; } } if (!seaf_fs_manager_object_exists (seaf->fs_mgr, repo->store_id, repo->version, file_id)) { error_str = "Invalid file id\n"; goto out; } if (!repo->encrypted && byte_ranges) { if (do_file_range (req, repo, file_id, filename, operation, byte_ranges, user) < 0) { error_str = "Internal server error\n"; error_code = EVHTP_RES_SERVERR; goto out; } } else if (do_file(req, repo, file_id, filename, operation, key, user) < 0) { error_str = "Internal server error\n"; error_code = EVHTP_RES_SERVERR; goto out; } error_code = EVHTP_RES_OK; out: g_strfreev (parts); g_free (token); g_free (user); g_free (dec_path); g_free (rpath); g_free (filename); g_free (file_id); g_free (ip_addr); if (repo != NULL) seaf_repo_unref (repo); if (key != NULL) g_object_unref (key); if (error_code != EVHTP_RES_OK) { evbuffer_add_printf(req->buffer_out, "%s\n", error_str); evhtp_send_reply(req, error_code); } g_free (err_msg); } static int do_block(evhtp_request_t *req, SeafRepo *repo, const char *user, const char *file_id, const char *blk_id) { Seafile *file; uint32_t bsize; gboolean found = FALSE; int i; char blk_size[255]; char cont_filename[SEAF_PATH_MAX]; SendBlockData *data; file = seaf_fs_manager_get_seafile(seaf->fs_mgr, repo->store_id, repo->version, file_id); if (file == NULL) return -1; for (i = 0; i < file->n_blocks; i++) { if (memcmp(file->blk_sha1s[i], blk_id, 40) == 0) { BlockMetadata *bm = seaf_block_manager_stat_block (seaf->block_mgr, repo->store_id, repo->version, blk_id); if (bm && bm->size >= 0) { bsize = bm->size; found = TRUE; } g_free (bm); break; } } seafile_unref (file); /* block not found. */ if (!found) { evhtp_send_reply (req, EVHTP_RES_BADREQ); return 0; } evhtp_headers_add_header(req->headers_out, evhtp_header_new("Access-Control-Allow-Origin", "*", 1, 1)); if (test_firefox (req)) { snprintf(cont_filename, SEAF_PATH_MAX, "attachment;filename*=\"utf-8\' \'%s\"", blk_id); } else { snprintf(cont_filename, SEAF_PATH_MAX, "attachment;filename=\"%s\"", blk_id); } evhtp_headers_add_header(req->headers_out, evhtp_header_new("Content-Disposition", cont_filename, 1, 1)); snprintf(blk_size, sizeof(blk_size), "%"G_GUINT32_FORMAT"", bsize); evhtp_headers_add_header (req->headers_out, evhtp_header_new("Content-Length", blk_size, 1, 1)); data = g_new0 (SendBlockData, 1); data->req = req; data->block_id = g_strdup(blk_id); data->user = g_strdup(user); memcpy (data->store_id, repo->store_id, 36); data->repo_version = repo->version; /* We need to overwrite evhtp's callback functions to * write file data piece by piece. */ struct bufferevent *bev = evhtp_request_get_bev (req); data->saved_read_cb = bev->readcb; data->saved_write_cb = bev->writecb; data->saved_event_cb = bev->errorcb; data->saved_cb_arg = bev->cbarg; data->bsize = bsize; bufferevent_setcb (bev, NULL, write_block_data_cb, my_block_event_cb, data); /* Block any new request from this connection before finish * handling this request. */ evhtp_request_pause (req); /* Kick start data transfer by sending out http headers. */ evhtp_send_reply_start(req, EVHTP_RES_OK); return 0; } static void access_blks_cb(evhtp_request_t *req, void *arg) { SeafRepo *repo = NULL; char *error = NULL; char *token = NULL; char *blkid = NULL; const char *repo_id = NULL; const char *id = NULL; const char *operation = NULL; const char *user = NULL; int error_code = EVHTP_RES_BADREQ; char *repo_role = NULL; SeafileWebAccess *webaccess = NULL; /* Skip the first '/'. */ char **parts = g_strsplit (req->uri->path->full + 1, "/", 0); if (!parts || g_strv_length (parts) < 3 || strcmp (parts[0], "blks") != 0) { error = "Invalid URL"; goto on_error; } token = parts[1]; blkid = parts[2]; webaccess = seaf_web_at_manager_query_access_token (seaf->web_at_mgr, token); if (!webaccess) { error = "Access token not found"; error_code = EVHTP_RES_FORBIDDEN; goto on_error; } if (can_use_cached_content (req)) { goto success; } repo_id = seafile_web_access_get_repo_id (webaccess); id = seafile_web_access_get_obj_id (webaccess); operation = seafile_web_access_get_op (webaccess); user = seafile_web_access_get_username (webaccess); repo = seaf_repo_manager_get_repo(seaf->repo_mgr, repo_id); if (!repo) { error = "Bad repo id\n"; goto on_error; } if (!seaf_fs_manager_object_exists (seaf->fs_mgr, repo->store_id, repo->version, id)) { error = "Invalid file id\n"; goto on_error; } if (strcmp(operation, "downloadblks") == 0) { if (do_block(req, repo, user, id, blkid) < 0) { seaf_warning ("Failed to download blocks for token: %s\n", token); error_code = EVHTP_RES_SERVERR; goto on_error; } } success: g_strfreev (parts); if (repo != NULL) seaf_repo_unref (repo); g_free (repo_role); g_object_unref (webaccess); return; on_error: g_strfreev (parts); if (repo != NULL) seaf_repo_unref (repo); g_free (repo_role); if (webaccess != NULL) g_object_unref (webaccess); evbuffer_add_printf(req->buffer_out, "%s\n", error); evhtp_send_reply(req, error_code); } static void access_link_cb(evhtp_request_t *req, void *arg) { SeafRepo *repo = NULL; char *error_str = NULL; char *token = NULL; char *rpath = NULL; char *filename = NULL; char *file_id = NULL; char *user = NULL; char *norm_file_path = NULL; const char *repo_id = NULL; const char *file_path = NULL; const char *share_type = NULL; const char *byte_ranges = NULL; const char *operation = NULL; int error_code = EVHTP_RES_BADREQ; SeafileCryptKey *key = NULL; SeafileShareLinkInfo *info = NULL; GError *error = NULL; if (!seaf->seahub_pk) { seaf_warning ("No seahub private key is configured.\n"); evhtp_send_reply(req, EVHTP_RES_NOTFOUND); return; } /* Skip the first '/'. */ char **parts = g_strsplit (req->uri->path->full + 1, "/", 0); if (!parts || g_strv_length (parts) < 2 || strcmp (parts[0], "f") != 0) { error_str = "Invalid URL\n"; goto out; } token = parts[1]; operation = evhtp_kv_find (req->uri->query, "op"); if (g_strcmp0 (operation, "view") != 0) { operation = "download-link"; } char *ip_addr = get_client_ip_addr (req); const char *user_agent = evhtp_header_find (req->headers_in, "User-Agent"); const char *cookie = evhtp_kv_find (req->headers_in, "Cookie"); int status = HTTP_OK; char *err_msg = NULL; info = http_tx_manager_query_share_link_info (token, cookie, "file", ip_addr, user_agent, &status, &err_msg); if (!info) { g_strfreev (parts); if (status != HTTP_OK) { evbuffer_add_printf(req->buffer_out, "%s\n", err_msg); evhtp_send_reply(req, status); } else { error_str = "Internal server error\n"; error_code = EVHTP_RES_SERVERR; evbuffer_add_printf(req->buffer_out, "%s\n", error_str); evhtp_send_reply(req, error_code); } g_free (ip_addr); g_free (err_msg); return; } g_free (ip_addr); repo_id = seafile_share_link_info_get_repo_id (info); file_path = seafile_share_link_info_get_file_path (info); if (!file_path) { error_str = "Internal server error\n"; error_code = EVHTP_RES_SERVERR; seaf_warning ("Failed to get file_path by token %s\n", token); goto out; } share_type = seafile_share_link_info_get_share_type (info); if (g_strcmp0 (share_type, "f") != 0) { error_str = "Link type mismatch"; goto out; } norm_file_path = normalize_utf8_path(file_path); rpath = format_dir_path (norm_file_path); filename = g_path_get_basename (rpath); repo = seaf_repo_manager_get_repo(seaf->repo_mgr, repo_id); if (!repo) { error_str = "Bad repo id\n"; goto out; } user = seaf_repo_manager_get_repo_owner (seaf->repo_mgr, repo_id); file_id = seaf_fs_manager_get_seafile_id_by_path (seaf->fs_mgr, repo->store_id, repo->version, repo->root_id, rpath, &error); if (!file_id) { error_str = "Invalid file_path\n"; if (error) g_clear_error(&error); goto out; } const char *etag = evhtp_kv_find (req->headers_in, "If-None-Match"); if (g_strcmp0 (etag, file_id) == 0) { evhtp_send_reply (req, EVHTP_RES_NOTMOD); error_code = EVHTP_RES_OK; goto out; } set_etag (req, file_id); set_no_cache (req, FALSE); byte_ranges = evhtp_kv_find (req->headers_in, "Range"); if (repo->encrypted) { key = seaf_passwd_manager_get_decrypt_key (seaf->passwd_mgr, repo_id, user); if (!key) { error_str = "Repo is encrypted. Please provide password to view it."; goto out; } } if (!seaf_fs_manager_object_exists (seaf->fs_mgr, repo->store_id, repo->version, file_id)) { error_str = "Invalid file id\n"; goto out; } if (!repo->encrypted && byte_ranges) { if (do_file_range (req, repo, file_id, filename, operation, byte_ranges, user) < 0) { error_str = "Internal server error\n"; error_code = EVHTP_RES_SERVERR; goto out; } } else if (do_file(req, repo, file_id, filename, operation, key, user) < 0) { error_str = "Internal server error\n"; error_code = EVHTP_RES_SERVERR; goto out; } error_code = EVHTP_RES_OK; out: g_strfreev (parts); g_free (user); g_free (norm_file_path); g_free (rpath); g_free (filename); g_free (file_id); if (repo != NULL) seaf_repo_unref (repo); if (key != NULL) g_object_unref (key); if (info != NULL) g_object_unref (info); if (error_code != EVHTP_RES_OK) { evbuffer_add_printf(req->buffer_out, "%s\n", error_str); evhtp_send_reply(req, error_code); } } /* static GList * json_to_dirent_list (SeafRepo *repo, const char *parent_dir, const char *dirents) { json_t *array; json_error_t jerror; int i; int len; const char *tmp_file_name; char *file_name = NULL; GList *dirent_list = NULL, *p = NULL; SeafDir *dir; SeafDirent *dirent; GError *error = NULL; array = json_loadb (dirents, strlen(dirents), 0, &jerror); if (!array) { seaf_warning ("Failed to parse download data: %s.\n", jerror.text); return NULL; } len = json_array_size (array); if (len == 0) { seaf_warning ("Invalid download data, miss download file name.\n"); json_decref (array); return NULL; } dir = seaf_fs_manager_get_seafdir_by_path (seaf->fs_mgr, repo->store_id, repo->version, repo->root_id, parent_dir, &error); if (!dir) { if (error) { seaf_warning ("Failed to get dir %s repo %.8s: %s.\n", parent_dir, repo->store_id, error->message); g_clear_error(&error); } else { seaf_warning ("dir %s doesn't exist in repo %.8s.\n", parent_dir, repo->store_id); } json_decref (array); return NULL; } GHashTable *dirent_hash = g_hash_table_new(g_str_hash, g_str_equal); for (p = dir->entries; p; p = p->next) { SeafDirent *d = p->data; g_hash_table_insert(dirent_hash, d->name, d); } for (i = 0; i < len; i++) { tmp_file_name = json_string_value (json_array_get (array, i)); file_name = normalize_utf8_path(tmp_file_name); if (strcmp (file_name, "") == 0 || strchr (file_name, '/') != NULL) { seaf_warning ("Invalid download file name: %s.\n", file_name); if (dirent_list) { g_list_free_full (dirent_list, (GDestroyNotify)seaf_dirent_free); dirent_list = NULL; } g_free (file_name); break; } dirent = g_hash_table_lookup (dirent_hash, file_name); if (!dirent) { seaf_warning ("Failed to get dirent for %s in dir %s in repo %.8s.\n", file_name, parent_dir, repo->store_id); if (dirent_list) { g_list_free_full (dirent_list, (GDestroyNotify)seaf_dirent_free); dirent_list = NULL; } g_free (file_name); break; } dirent_list = g_list_prepend (dirent_list, seaf_dirent_dup(dirent)); g_free (file_name); } g_hash_table_unref(dirent_hash); json_decref (array); seaf_dir_free (dir); return dirent_list; } // application/x-www-form-urlencoded // parent_dir=/sub&dirents=[a.md, suba] static char * get_form_field (const char *body_str, const char *field_name) { char * value = NULL; char * result = NULL; char * start = strstr(body_str, field_name); // find pos of start if (start) { // skip field and '=' start += strlen(field_name) + 1; // find pos of '&' char * end = strchr(start, '&'); if (end == NULL) { end = start + strlen(start); } value = g_strndup(start, end - start); } if (!value) { return NULL; } result = g_uri_unescape_string (value, NULL); g_free (value); return result; } */ /* static void access_dir_link_cb(evhtp_request_t *req, void *arg) { SeafRepo *repo = NULL; char *error_str = NULL; char *token = NULL; char *r_parent_dir = NULL; char *fullpath = NULL; char *file_id = NULL; char *filename = NULL; char *norm_parent_dir = NULL; char *norm_path = NULL; char *user = NULL; char *tmp_parent_dir = NULL; char *dirents = NULL; const char *repo_id = NULL; const char *parent_dir = NULL; const char *path= NULL; const char *byte_ranges = NULL; int error_code = EVHTP_RES_BADREQ; SeafileCryptKey *key = NULL; SeafileShareLinkInfo *info = NULL; GError *error = NULL; if (!seaf->seahub_pk) { seaf_warning ("No seahub private key is configured.\n"); evhtp_send_reply(req, EVHTP_RES_NOTFOUND); return; } // Skip the first '/'. char **parts = g_strsplit (req->uri->path->full + 1, "/", 0); if (!parts || g_strv_length (parts) < 2 || strcmp (parts[0], "d") != 0) { error_str = "Invalid URL\n"; goto on_error; } token = parts[1]; if (g_strv_length (parts) >= 4) { if (strcmp (parts[2], "zip-task") != 0) { error_str = "Invalid URL\n"; goto on_error; } char *task_id = parts[3]; char *progress = zip_download_mgr_query_zip_progress (seaf->zip_download_mgr, task_id, NULL); if (!progress) { error_str = "No zip progress\n"; goto on_error; } evbuffer_add_printf (req->buffer_out, "%s", progress); evhtp_headers_add_header ( req->headers_out, evhtp_header_new("Content-Type", "application/json; charset=utf-8", 1, 1)); evhtp_send_reply (req, EVHTP_RES_OK); g_free (progress); goto success; } info = http_tx_manager_query_share_link_info (token, "dir"); if (!info) { error_str = "Link token not found\n"; error_code = EVHTP_RES_FORBIDDEN; goto on_error; } repo_id = seafile_share_link_info_get_repo_id (info); repo = seaf_repo_manager_get_repo(seaf->repo_mgr, repo_id); if (!repo) { error_str = "Bad repo id\n"; goto on_error; } user = seaf_repo_manager_get_repo_owner (seaf->repo_mgr, repo_id); path = evhtp_kv_find (req->uri->query, "p"); if (!path) { int len = evbuffer_get_length (req->buffer_in); if (len <= 0) { error_str = "Invalid request body\n"; goto on_error; } char *body = g_new0 (char, len); evbuffer_remove(req->buffer_in, body, len); tmp_parent_dir = get_form_field (body, "parent_dir"); if (!tmp_parent_dir) { g_free (body); error_str = "Invalid parent_dir\n"; goto on_error; } dirents = get_form_field (body, "dirents"); if (!dirents) { g_free (body); g_free (tmp_parent_dir); error_str = "Invalid dirents\n"; goto on_error; } g_free (body); norm_parent_dir = normalize_utf8_path (tmp_parent_dir); r_parent_dir = format_dir_path (norm_parent_dir); GList *dirent_list = json_to_dirent_list (repo, r_parent_dir, dirents); if (!dirent_list) { error_str = "Invalid dirents\n"; goto on_error; } char *task_id = NULL; if (g_list_length(dirent_list) == 1) { task_id = zip_download_mgr_start_zip_task_v2 (seaf->zip_download_mgr, repo_id, "download-dir-link", user, dirent_list); } else { task_id = zip_download_mgr_start_zip_task_v2 (seaf->zip_download_mgr, repo_id, "download-multi-link", user, dirent_list); } if (!task_id) { g_list_free_full (dirent_list, (GDestroyNotify)seaf_dirent_free); error_str = "Internal server error\n"; error_code = EVHTP_RES_SERVERR; goto on_error; } evbuffer_add_printf (req->buffer_out, "{\"task_id\": \"%s\"}", task_id); evhtp_headers_add_header ( req->headers_out, evhtp_header_new("Content-Type", "application/json; charset=utf-8", 1, 1)); evhtp_send_reply (req, EVHTP_RES_OK); g_free (task_id); goto success; } if (can_use_cached_content (req)) { goto success; } parent_dir = seafile_share_link_info_get_parent_dir (info); norm_parent_dir = normalize_utf8_path (parent_dir); norm_path = normalize_utf8_path (path); r_parent_dir = format_dir_path (norm_parent_dir); fullpath = g_build_filename(r_parent_dir, norm_path, NULL); filename = g_path_get_basename (fullpath); file_id = seaf_fs_manager_get_seafile_id_by_path (seaf->fs_mgr, repo->store_id, repo->version, repo->root_id, fullpath, &error); if (!file_id) { error_str = "Invalid file_path\n"; if (error) g_clear_error(&error); goto on_error; } set_etag (req, file_id); byte_ranges = evhtp_kv_find (req->headers_in, "Range"); if (repo->encrypted) { key = seaf_passwd_manager_get_decrypt_key (seaf->passwd_mgr, repo_id, user); if (!key) { error_str = "Repo is encrypted. Please provide password to view it."; goto on_error; } } if (!seaf_fs_manager_object_exists (seaf->fs_mgr, repo->store_id, repo->version, file_id)) { error_str = "Invalid file id\n"; goto on_error; } if (!repo->encrypted && byte_ranges) { if (do_file_range (req, repo, file_id, filename, "download-link", byte_ranges, user) < 0) { error_str = "Internal server error\n"; error_code = EVHTP_RES_SERVERR; goto on_error; } } else if (do_file(req, repo, file_id, filename, "download-link", key, user) < 0) { error_str = "Internal server error\n"; error_code = EVHTP_RES_SERVERR; goto on_error; } success: g_strfreev (parts); g_free (tmp_parent_dir); g_free (dirents); g_free (user); g_free (norm_parent_dir); g_free (norm_path); g_free (r_parent_dir); g_free (fullpath); g_free (filename); g_free (file_id); if (repo != NULL) seaf_repo_unref (repo); if (key != NULL) g_object_unref (key); if (info) g_object_unref (info); return; on_error: g_strfreev (parts); g_free (tmp_parent_dir); g_free (dirents); g_free (user); g_free (norm_parent_dir); g_free (norm_path); g_free (r_parent_dir); g_free (fullpath); g_free (filename); g_free (file_id); if (repo != NULL) seaf_repo_unref (repo); if (key != NULL) g_object_unref (key); if (info != NULL) g_object_unref (info); evbuffer_add_printf(req->buffer_out, "%s\n", error_str); evhtp_send_reply(req, error_code); } */ static evhtp_res request_finish_cb (evhtp_request_t *req, void *arg) { RequestInfo *info = arg; struct timeval end, intv; seaf_metric_manager_in_flight_request_dec (seaf->metric_mgr); if (!info) return EVHTP_RES_OK; g_free (info->url_path); g_free (info); return EVHTP_RES_OK; } static evhtp_res access_headers_cb (evhtp_request_t *req, evhtp_headers_t *hdr, void *arg) { RequestInfo *info = NULL; info = g_new0 (RequestInfo, 1); info->url_path = g_strdup (req->uri->path->full); gettimeofday (&info->start, NULL); seaf_metric_manager_in_flight_request_inc (seaf->metric_mgr); evhtp_set_hook (&req->hooks, evhtp_hook_on_request_fini, request_finish_cb, info); req->cbarg = info; return EVHTP_RES_OK; } int access_file_init (evhtp_t *htp) { evhtp_callback_t *cb; cb = evhtp_set_regex_cb (htp, "^/files/.*", access_cb, NULL); evhtp_set_hook(&cb->hooks, evhtp_hook_on_headers, access_headers_cb, NULL); cb = evhtp_set_regex_cb (htp, "^/blks/.*", access_blks_cb, NULL); evhtp_set_hook(&cb->hooks, evhtp_hook_on_headers, access_headers_cb, NULL); cb = evhtp_set_regex_cb (htp, "^/zip/.*", access_zip_cb, NULL); evhtp_set_hook(&cb->hooks, evhtp_hook_on_headers, access_headers_cb, NULL); cb = evhtp_set_regex_cb (htp, "^/f/.*", access_link_cb, NULL); evhtp_set_hook(&cb->hooks, evhtp_hook_on_headers, access_headers_cb, NULL); //evhtp_set_regex_cb (htp, "^/d/.*", access_dir_link_cb, NULL); cb = evhtp_set_regex_cb (htp, "^/repos/[\\da-z]{8}-[\\da-z]{4}-[\\da-z]{4}-[\\da-z]{4}-[\\da-z]{12}/files/.*", access_v2_cb, NULL); evhtp_set_hook(&cb->hooks, evhtp_hook_on_headers, access_headers_cb, NULL); return 0; } #endif ================================================ FILE: server/access-file.h ================================================ #ifndef ACCESS_FILE_H #define ACCESS_FILE_H #ifdef HAVE_EVHTP int access_file_init (evhtp_t *htp); #endif #endif ================================================ FILE: server/change-set.c ================================================ /* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */ #include "common.h" #include "seafile-session.h" #include "utils.h" #include "log.h" #include "change-set.h" struct _ChangeSetDir { int version; char dir_id[41]; /* A hash table of dirents for fast lookup and insertion. */ GHashTable *dents; }; typedef struct _ChangeSetDir ChangeSetDir; struct _ChangeSetDirent { guint32 mode; char id[41]; char *name; gint64 mtime; char *modifier; gint64 size; /* Only used for directory. Most of time this is NULL * unless we change the subdir too. */ ChangeSetDir *subdir; }; typedef struct _ChangeSetDirent ChangeSetDirent; /* Change set dirent. */ static ChangeSetDirent * changeset_dirent_new (const char *id, guint32 mode, const char *name, gint64 mtime, const char *modifier, gint64 size) { ChangeSetDirent *dent = g_new0 (ChangeSetDirent, 1); dent->mode = mode; memcpy (dent->id, id, 40); dent->name = g_strdup(name); dent->mtime = mtime; dent->modifier = g_strdup(modifier); dent->size = size; return dent; } static ChangeSetDirent * seaf_dirent_to_changeset_dirent (SeafDirent *seaf_dent) { return changeset_dirent_new (seaf_dent->id, seaf_dent->mode, seaf_dent->name, seaf_dent->mtime, seaf_dent->modifier, seaf_dent->size); } static SeafDirent * changeset_dirent_to_seaf_dirent (int version, ChangeSetDirent *dent) { return seaf_dirent_new (version, dent->id, dent->mode, dent->name, dent->mtime, dent->modifier, dent->size); } static void changeset_dir_free (ChangeSetDir *dir); static void changeset_dirent_free (ChangeSetDirent *dent) { if (!dent) return; g_free (dent->name); g_free (dent->modifier); /* Recursively free subdir. */ if (dent->subdir) changeset_dir_free (dent->subdir); g_free (dent); } /* Change set dir. */ static void add_dent_to_dir (ChangeSetDir *dir, ChangeSetDirent *dent) { g_hash_table_insert (dir->dents, g_strdup(dent->name), dent); } static void remove_dent_from_dir (ChangeSetDir *dir, const char *dname) { char *key; if (g_hash_table_lookup_extended (dir->dents, dname, (gpointer*)&key, NULL)) { g_hash_table_steal (dir->dents, dname); g_free (key); } } static ChangeSetDir * changeset_dir_new (int version, const char *id, GList *dirents) { ChangeSetDir *dir = g_new0 (ChangeSetDir, 1); GList *ptr; SeafDirent *dent; ChangeSetDirent *changeset_dent; dir->version = version; if (id) memcpy (dir->dir_id, id, 40); dir->dents = g_hash_table_new_full (g_str_hash, g_str_equal, g_free, (GDestroyNotify)changeset_dirent_free); for (ptr = dirents; ptr; ptr = ptr->next) { dent = ptr->data; changeset_dent = seaf_dirent_to_changeset_dirent(dent); add_dent_to_dir (dir, changeset_dent); } return dir; } static void changeset_dir_free (ChangeSetDir *dir) { if (!dir) return; g_hash_table_destroy (dir->dents); g_free (dir); } static ChangeSetDir * seaf_dir_to_changeset_dir (SeafDir *seaf_dir) { return changeset_dir_new (seaf_dir->version, seaf_dir->dir_id, seaf_dir->entries); } static gint compare_dents (gconstpointer a, gconstpointer b) { const SeafDirent *denta = a, *dentb = b; return strcmp(dentb->name, denta->name); } static SeafDir * changeset_dir_to_seaf_dir (ChangeSetDir *dir) { GList *dents = NULL, *seaf_dents = NULL; GList *ptr; ChangeSetDirent *dent; SeafDirent *seaf_dent; SeafDir *seaf_dir; dents = g_hash_table_get_values (dir->dents); for (ptr = dents; ptr; ptr = ptr->next) { dent = ptr->data; seaf_dent = changeset_dirent_to_seaf_dirent (dir->version, dent); seaf_dents = g_list_prepend (seaf_dents, seaf_dent); } /* Sort it in descending order. */ seaf_dents = g_list_sort (seaf_dents, compare_dents); /* seaf_dir_new() computes the dir id. */ seaf_dir = seaf_dir_new (NULL, seaf_dents, dir->version); g_list_free (dents); return seaf_dir; } /* Change set. */ ChangeSet * changeset_new (const char *repo_id, SeafDir *dir) { ChangeSetDir *changeset_dir = NULL; ChangeSet *changeset = NULL; changeset_dir = seaf_dir_to_changeset_dir (dir); if (!changeset_dir) goto out; changeset = g_new0 (ChangeSet, 1); memcpy (changeset->repo_id, repo_id, 36); changeset->tree_root = changeset_dir; out: return changeset; } void changeset_free (ChangeSet *changeset) { if (!changeset) return; changeset_dir_free (changeset->tree_root); g_free (changeset); } static ChangeSetDirent * delete_from_tree (ChangeSet *changeset, const char *path, gboolean *parent_empty) { char *repo_id = changeset->repo_id; ChangeSetDir *root = changeset->tree_root; char **parts, *dname; int n, i; ChangeSetDir *dir; ChangeSetDirent *dent, *ret = NULL; ChangeSetDirent *parent_dent = NULL; SeafDir *seaf_dir; *parent_empty = FALSE; parts = g_strsplit (path, "/", 0); n = g_strv_length(parts); dir = root; for (i = 0; i < n; i++) { dname = parts[i]; dent = g_hash_table_lookup (dir->dents, dname); if (!dent) break; if (S_ISDIR(dent->mode)) { if (i == (n-1)) { /* Remove from hash table without freeing dent. */ remove_dent_from_dir (dir, dname); if (g_hash_table_size (dir->dents) == 0) *parent_empty = TRUE; ret = dent; // update parent dir mtime when delete dirs locally. if (parent_dent) { parent_dent->mtime = time (NULL); } break; } if (!dent->subdir) { seaf_dir = seaf_fs_manager_get_seafdir(seaf->fs_mgr, repo_id, root->version, dent->id); if (!seaf_dir) { seaf_warning ("Failed to load seafdir %s:%s\n", repo_id, dent->id); break; } dent->subdir = seaf_dir_to_changeset_dir (seaf_dir); seaf_dir_free (seaf_dir); } dir = dent->subdir; parent_dent = dent; } else if (S_ISREG(dent->mode)) { if (i == (n-1)) { /* Remove from hash table without freeing dent. */ remove_dent_from_dir (dir, dname); if (g_hash_table_size (dir->dents) == 0) *parent_empty = TRUE; ret = dent; // update parent dir mtime when delete files locally. if (parent_dent) { parent_dent->mtime = time (NULL); } break; } } } g_strfreev (parts); return ret; } static void remove_from_changeset_recursive (ChangeSet *changeset, const char *path, gboolean remove_parent, const char *top_dir, int *mode) { ChangeSetDirent *dent; gboolean parent_empty = FALSE; dent = delete_from_tree (changeset, path, &parent_empty); if (mode && dent) *mode = dent->mode; changeset_dirent_free (dent); if (remove_parent && parent_empty) { char *parent = g_strdup(path); char *slash = strrchr (parent, '/'); if (slash) { *slash = '\0'; if (strlen(parent) >= strlen(top_dir)) { /* Recursively remove parent dirs. */ remove_from_changeset_recursive (changeset, parent, remove_parent, top_dir, mode); } } g_free (parent); } } void remove_from_changeset (ChangeSet *changeset, const char *path, gboolean remove_parent, const char *top_dir, int *mode) { remove_from_changeset_recursive (changeset, path, remove_parent, top_dir, mode); } static char * commit_tree_recursive (const char *repo_id, ChangeSetDir *dir) { ChangeSetDirent *dent; GHashTableIter iter; gpointer key, value; char *new_id; SeafDir *seaf_dir; char *ret = NULL; g_hash_table_iter_init (&iter, dir->dents); while (g_hash_table_iter_next (&iter, &key, &value)) { dent = value; if (dent->subdir) { new_id = commit_tree_recursive (repo_id, dent->subdir); if (!new_id) return NULL; memcpy (dent->id, new_id, 40); g_free (new_id); } } seaf_dir = changeset_dir_to_seaf_dir (dir); memcpy (dir->dir_id, seaf_dir->dir_id, 40); if (!seaf_fs_manager_object_exists (seaf->fs_mgr, repo_id, dir->version, seaf_dir->dir_id)) { if (seaf_dir_save (seaf->fs_mgr, repo_id, dir->version, seaf_dir) < 0) { seaf_warning ("Failed to save dir object %s to repo %s.\n", seaf_dir->dir_id, repo_id); goto out; } } ret = g_strdup(seaf_dir->dir_id); out: seaf_dir_free (seaf_dir); return ret; } /* * This function does two things: * - calculate dir id from bottom up; * - create and save seaf dir objects. * It returns root dir id of the new commit. */ char * commit_tree_from_changeset (ChangeSet *changeset) { char *root_id = commit_tree_recursive (changeset->repo_id, changeset->tree_root); return root_id; } ================================================ FILE: server/change-set.h ================================================ /* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */ #ifndef SEAF_CHANGE_SET_H #define SEAF_CHANGE_SET_H #include #include "utils.h" struct _ChangeSetDir; struct _ChangeSet { char repo_id[37]; /* A partial tree for all changed directories. */ struct _ChangeSetDir *tree_root; }; typedef struct _ChangeSet ChangeSet; ChangeSet * changeset_new (const char *repo_id, SeafDir *dir); void changeset_free (ChangeSet *changeset); /* @remove_parent: remove the parent dir when it becomes empty. */ void remove_from_changeset (ChangeSet *changeset, const char *path, gboolean remove_parent, const char *top_dir, int *mode); char * commit_tree_from_changeset (ChangeSet *changeset); #endif ================================================ FILE: server/copy-mgr.c ================================================ #include "common.h" #include "log.h" #include #include "seafile-session.h" #include "seafile-object.h" #include "seafile-error.h" #include "copy-mgr.h" #include "utils.h" #include "log.h" #define DEFAULT_MAX_THREADS 5 struct _SeafCopyManagerPriv { GHashTable *copy_tasks; pthread_mutex_t lock; CcnetJobManager *job_mgr; }; static void copy_task_free (CopyTask *task) { if (!task) return; g_free (task->failed_reason); g_free (task); } SeafCopyManager * seaf_copy_manager_new (struct _SeafileSession *session) { SeafCopyManager *mgr = g_new0 (SeafCopyManager, 1); mgr->session = session; mgr->priv = g_new0 (struct _SeafCopyManagerPriv, 1); mgr->priv->copy_tasks = g_hash_table_new_full (g_str_hash, g_str_equal, g_free, (GDestroyNotify)copy_task_free); pthread_mutex_init (&mgr->priv->lock, NULL); mgr->max_files = g_key_file_get_int64 (session->config, "web_copy", "max_files", NULL); mgr->max_size = g_key_file_get_int64 (session->config, "web_copy", "max_size", NULL); /* size is given in MB */ mgr->max_size <<= 20; return mgr; } int seaf_copy_manager_start (SeafCopyManager *mgr) { mgr->priv->job_mgr = ccnet_job_manager_new (DEFAULT_MAX_THREADS); return 1; } SeafileCopyTask * seaf_copy_manager_get_task (SeafCopyManager *mgr, const char *task_id) { SeafCopyManagerPriv *priv = mgr->priv; CopyTask *task; SeafileCopyTask *t = NULL; pthread_mutex_lock (&priv->lock); task = g_hash_table_lookup (priv->copy_tasks, task_id); if (task) { t = seafile_copy_task_new (); g_object_set (t, "done", task->done, "total", task->total, "canceled", task->canceled, "failed", task->failed, "failed_reason", task->failed_reason, "successful", task->successful, NULL); if (task->canceled || task->failed || task->successful) g_hash_table_remove(priv->copy_tasks, task_id); } pthread_mutex_unlock (&priv->lock); return t; } struct CopyThreadData { SeafCopyManager *mgr; char src_repo_id[37]; char *src_path; char *src_filename; char dst_repo_id[37]; char *dst_path; char *dst_filename; int replace; char *modifier; CopyTask *task; CopyTaskFunc func; }; typedef struct CopyThreadData CopyThreadData; static void * copy_thread (void *vdata) { CopyThreadData *data = vdata; data->func (data->src_repo_id, data->src_path, data->src_filename, data->dst_repo_id, data->dst_path, data->dst_filename, data->replace, data->modifier, data->task); return vdata; } static void copy_done (void *vdata) { CopyThreadData *data = vdata; g_free (data->src_path); g_free (data->src_filename); g_free (data->dst_path); g_free (data->dst_filename); g_free (data->modifier); g_free (data); } char * seaf_copy_manager_add_task (SeafCopyManager *mgr, const char *src_repo_id, const char *src_path, const char *src_filename, const char *dst_repo_id, const char *dst_path, const char *dst_filename, int replace, const char *modifier, CopyTaskFunc function, gboolean need_progress) { SeafCopyManagerPriv *priv = mgr->priv; char *task_id = NULL; CopyTask *task = NULL; struct CopyThreadData *data; if (need_progress) { task_id = gen_uuid(); task = g_new0 (CopyTask, 1); memcpy (task->task_id, task_id, 36); pthread_mutex_lock (&priv->lock); g_hash_table_insert (priv->copy_tasks, g_strdup(task_id), task); pthread_mutex_unlock (&priv->lock); } data = g_new0 (CopyThreadData, 1); data->mgr = mgr; memcpy (data->src_repo_id, src_repo_id, 36); data->src_path = g_strdup(src_path); data->src_filename = g_strdup(src_filename); memcpy (data->dst_repo_id, dst_repo_id, 36); data->dst_path = g_strdup(dst_path); data->dst_filename = g_strdup(dst_filename); data->replace = replace; data->modifier = g_strdup(modifier); data->task = task; data->func = function; ccnet_job_manager_schedule_job (mgr->priv->job_mgr, copy_thread, copy_done, data); return task_id; } int seaf_copy_manager_cancel_task (SeafCopyManager *mgr, const char *task_id) { SeafCopyManagerPriv *priv = mgr->priv; CopyTask *task; pthread_mutex_lock (&priv->lock); task = g_hash_table_lookup (priv->copy_tasks, task_id); pthread_mutex_unlock (&priv->lock); if (task) { if (task->canceled || task->failed || task->successful) return -1; g_atomic_int_set (&task->canceled, 1); } return 0; } ================================================ FILE: server/copy-mgr.h ================================================ #ifndef COPY_MGR_H #define COPY_MGR_H #include #define COPY_ERR_INTERNAL "Internal error when copy or move" #define COPY_ERR_BAD_ARG "Invalid arguments" #define COPY_ERR_TOO_MANY_FILES "Too many files" #define COPY_ERR_SIZE_TOO_LARGE "Folder or file size is too large" #define COPY_ERR_QUOTA_IS_FULL "Quota is full" struct _SeafileSession; struct _SeafCopyManagerPriv; struct _SeafileCopyTask; struct _SeafCopyManager { struct _SeafileSession *session; struct _SeafCopyManagerPriv *priv; gint64 max_files; gint64 max_size; }; typedef struct _SeafCopyManager SeafCopyManager; typedef struct _SeafCopyManagerPriv SeafCopyManagerPriv; struct CopyTask { char task_id[37]; gint64 done; gint64 total; gint canceled; gboolean failed; char *failed_reason; gboolean successful; }; typedef struct CopyTask CopyTask; SeafCopyManager * seaf_copy_manager_new (struct _SeafileSession *session); int seaf_copy_manager_start (SeafCopyManager *mgr); typedef int (*CopyTaskFunc) (const char *, const char *, const char *, const char *, const char *, const char *, int, const char *, CopyTask *); char * seaf_copy_manager_add_task (SeafCopyManager *mgr, const char *src_repo_id, const char *src_path, const char *src_filename, const char *dst_repo_id, const char *dst_path, const char *dst_filename, int replace, const char *modifier, CopyTaskFunc function, gboolean need_progress); struct _SeafileCopyTask * seaf_copy_manager_get_task (SeafCopyManager *mgr, const char * id); int seaf_copy_manager_cancel_task (SeafCopyManager *mgr, const char *task_id); #endif ================================================ FILE: server/fileserver-config.c ================================================ #include "common.h" #include #include "fileserver-config.h" const char *OLD_GROUP_NAME = "httpserver"; const char *GROUP_NAME = "fileserver"; static const char * get_group_name(GKeyFile *config) { return g_key_file_has_group (config, GROUP_NAME) ? GROUP_NAME : OLD_GROUP_NAME; } int fileserver_config_get_integer(GKeyFile *config, char *key, GError **error) { const char *group = get_group_name(config); return g_key_file_get_integer (config, group, key, error); } int fileserver_config_get_int64(GKeyFile *config, char *key, GError **error) { const char *group = get_group_name(config); return g_key_file_get_int64 (config, group, key, error); } char * fileserver_config_get_string(GKeyFile *config, char *key, GError **error) { const char *group = get_group_name(config); return g_key_file_get_string (config, group, key, error); } gboolean fileserver_config_get_boolean(GKeyFile *config, char *key, GError **error) { const char *group = get_group_name(config); return g_key_file_get_boolean (config, group, key, error); } ================================================ FILE: server/fileserver-config.h ================================================ #ifndef SEAFILE_FILESERVER_CONFIG_H #define SEAFILE_FILESERVER_CONFIG_H struct GKeyFile; int fileserver_config_get_integer(GKeyFile *config, char *key, GError **error); char * fileserver_config_get_string(GKeyFile *config, char *key, GError **error); int fileserver_config_get_int64(GKeyFile *config, char *key, GError **error); gboolean fileserver_config_get_boolean(GKeyFile *config, char *key, GError **error); #endif // SEAFILE_FILESERVER_CONFIG_H ================================================ FILE: server/gc/Makefile.am ================================================ AM_CFLAGS = -DPKGDATADIR=\"$(pkgdatadir)\" \ -DPACKAGE_DATA_DIR=\""$(pkgdatadir)"\" \ -DSEAFILE_SERVER \ -I$(top_srcdir)/include \ -I$(top_srcdir)/lib \ -I$(top_builddir)/lib \ -I$(top_srcdir)/common \ @SEARPC_CFLAGS@ \ @GLIB2_CFLAGS@ \ @MSVC_CFLAGS@ \ @MYSQL_CFLAGS@ \ -Wall bin_PROGRAMS = seafserv-gc seaf-fsck noinst_HEADERS = \ seafile-session.h \ repo-mgr.h \ verify.h \ fsck.h \ gc-core.h common_sources = \ seafile-session.c \ repo-mgr.c \ ../../common/seaf-db.c \ ../../common/branch-mgr.c \ ../../common/fs-mgr.c \ ../../common/block-mgr.c \ ../../common/block-backend.c \ ../../common/block-backend-fs.c \ ../../common/commit-mgr.c \ ../../common/log.c \ ../../common/seaf-utils.c \ ../../common/obj-store.c \ ../../common/obj-backend-fs.c \ ../../common/seafile-crypt.c \ ../../common/password-hash.c \ ../../common/config-mgr.c seafserv_gc_SOURCES = \ seafserv-gc.c \ verify.c \ gc-core.c \ $(common_sources) seafserv_gc_LDADD = $(top_builddir)/common/cdc/libcdc.la \ $(top_builddir)/lib/libseafile_common.la \ @GLIB2_LIBS@ @GOBJECT_LIBS@ @SSL_LIBS@ @LIB_RT@ @LIB_UUID@ -lsqlite3 @LIBEVENT_LIBS@ \ @SEARPC_LIBS@ @JANSSON_LIBS@ ${LIB_WS32} @ZLIB_LIBS@ \ @MYSQL_LIBS@ -lsqlite3 @ARGON2_LIBS@ seaf_fsck_SOURCES = \ seaf-fsck.c \ fsck.c \ $(common_sources) seaf_fsck_LDADD = $(top_builddir)/common/cdc/libcdc.la \ $(top_builddir)/lib/libseafile_common.la \ @GLIB2_LIBS@ @GOBJECT_LIBS@ @SSL_LIBS@ @LIB_RT@ @LIB_UUID@ -lsqlite3 @LIBEVENT_LIBS@ \ @SEARPC_LIBS@ @JANSSON_LIBS@ ${LIB_WS32} @ZLIB_LIBS@ \ @MYSQL_LIBS@ -lsqlite3 @ARGON2_LIBS@ ================================================ FILE: server/gc/fsck.c ================================================ #include "common.h" #include #include "seafile-session.h" #include "seaf-utils.h" #include "log.h" #include "utils.h" #include "fsck.h" typedef struct FsckData { FsckOptions *options; SeafRepo *repo; GHashTable *existing_blocks; GList *repaired_files; GList *repaired_folders; gint64 truncate_time; } FsckData; typedef struct CheckAndRecoverRepoObj { char *repo_id; FsckOptions *options; } CheckAndRecoverRepoObj; typedef enum VerifyType { VERIFY_FILE, VERIFY_DIR } VerifyType; static gboolean fsck_verify_seafobj (const char *store_id, int version, const char *obj_id, gboolean *io_error, VerifyType type, gboolean repair) { gboolean valid = TRUE; valid = seaf_fs_manager_object_exists (seaf->fs_mgr, store_id, version, obj_id); if (!valid) { if (type == VERIFY_FILE) { seaf_message ("File %s is missing.\n", obj_id); } else if (type == VERIFY_DIR) { seaf_message ("Dir %s is missing.\n", obj_id); } return valid; } if (type == VERIFY_FILE) { valid = seaf_fs_manager_verify_seafile (seaf->fs_mgr, store_id, version, obj_id, TRUE, io_error); if (!valid && !*io_error && repair) { seaf_message ("File %s is damaged.\n", obj_id); } } else if (type == VERIFY_DIR) { valid = seaf_fs_manager_verify_seafdir (seaf->fs_mgr, store_id, version, obj_id, TRUE, io_error); if (!valid && !*io_error && repair) { seaf_message ("Dir %s is damaged.\n", obj_id); } } return valid; } static int check_blocks (const char *file_id, FsckData *fsck_data, gboolean *io_error) { Seafile *seafile; int i; char *block_id; int ret = 0; int dummy; gboolean ok = TRUE; SeafRepo *repo = fsck_data->repo; const char *store_id = repo->store_id; int version = repo->version; seafile = seaf_fs_manager_get_seafile (seaf->fs_mgr, store_id, version, file_id); if (!seafile) { seaf_warning ("Failed to get seafile: %s/%s\n", store_id, file_id); return -1; } for (i = 0; i < seafile->n_blocks; ++i) { block_id = seafile->blk_sha1s[i]; if (g_hash_table_lookup (fsck_data->existing_blocks, block_id)) continue; if (!seaf_block_manager_block_exists (seaf->block_mgr, store_id, version, block_id)) { seaf_warning ("Repo[%.8s] block %s:%s is missing.\n", repo->id, store_id, block_id); ret = -1; continue; } if (fsck_data->options->check_integrity) { // check block integrity, if not remove it ok = seaf_block_manager_verify_block (seaf->block_mgr, store_id, version, block_id, io_error); if (!ok) { if (*io_error) { if (ret < 0) { *io_error = FALSE; } ret = -1; break; } else { if (fsck_data->options->repair) { seaf_message ("Repo[%.8s] block %s is damaged, remove it.\n", repo->id, block_id); seaf_block_manager_remove_block (seaf->block_mgr, store_id, version, block_id); } else { seaf_message ("Repo[%.8s] block %s is damaged.\n", repo->id, block_id); } ret = -1; } } } g_hash_table_insert (fsck_data->existing_blocks, g_strdup(block_id), &dummy); } seafile_unref (seafile); return ret; } typedef struct { SeafRepo *repo; const char *file_path; const char *file_id; char *commit_id; gboolean found; gboolean traversed_head; GHashTable *visited_commits; gint64 truncate_time; } CheckFileSizeData; static gboolean get_file_updated_commit (SeafCommit *commit, void *vdata, gboolean *stop) { CheckFileSizeData *data = vdata; SeafRepo *repo = data->repo; int ret; if (data->found) { *stop = TRUE; return TRUE; } if (g_hash_table_lookup (data->visited_commits, commit->commit_id)) { *stop = TRUE; return TRUE; } int dummy; g_hash_table_replace (data->visited_commits, g_strdup (commit->commit_id), &dummy); if (data->truncate_time == 0) { *stop = TRUE; } else if (data->truncate_time > 0 && (gint64)(commit->ctime) < data->truncate_time && data->traversed_head) { *stop = TRUE; } if (!data->traversed_head) data->traversed_head = TRUE; char *file_id; guint32 mode; GError *error = NULL; file_id = seaf_fs_manager_path_to_obj_id (seaf->fs_mgr, repo->store_id, repo->version, commit->root_id, data->file_path, &mode, &error); if (error) { g_clear_error (&error); } // Compare the file_id with the current file. // If the file_id has changed, then the previous commit is the commit where the file was modified. if (g_strcmp0 (data->file_id, file_id) != 0) { data->found = TRUE; *stop = TRUE; } else { g_free (data->commit_id); data->commit_id = g_strdup(commit->commit_id); } g_free (file_id); return TRUE; } static int check_file_size (FsckData *fsck_data, SeafDirent *dent, const char *path) { int ret = 0; SeafRepo *repo = fsck_data->repo; const char *store_id = repo->store_id; int version = repo->version; Seafile *seafile = NULL; seafile = seaf_fs_manager_get_seafile (seaf->fs_mgr, store_id, version, dent->id); if (!seafile) { seaf_warning ("Failed to get seafile: %s/%s\n", store_id, dent->id); return -1; } if (seafile->file_size == dent->size) { goto out; } CheckFileSizeData data; memset (&data, 0, sizeof(CheckFileSizeData)); data.repo = repo; data.file_path = path; data.file_id = dent->id; data.commit_id = g_strdup (repo->head->commit_id); data.visited_commits = g_hash_table_new_full (g_str_hash, g_str_equal, g_free, NULL); data.truncate_time = fsck_data->truncate_time; // Get the commit that added or modified this file. seaf_commit_manager_traverse_commit_tree (seaf->commit_mgr, repo->id, version, repo->head->commit_id, get_file_updated_commit, &data, FALSE); SeafCommit *commit = NULL; if (data.found) { commit = seaf_commit_manager_get_commit (seaf->commit_mgr, repo->id, repo->version, data.commit_id); } if (commit) { char time_buf[64]; strftime (time_buf, 64, "%Y-%m-%d %H:%M:%S", localtime((time_t *)&commit->ctime)); seaf_warning ("Repo[%s] file %s is damaged, as its size does not match the expected value. It was uploaded via %s (commit id is %s), commit desc is %s, commit time is %s.\n", repo->id, path, commit->client_version, commit->commit_id, commit->desc, time_buf); } else { seaf_warning ("Repo[%s] file %s is damaged, as its size does not match the expected value.\n", repo->id, path); } if (data.commit_id) g_free (data.commit_id); g_hash_table_destroy (data.visited_commits); if (commit) seaf_commit_unref (commit); out: seafile_unref (seafile); return ret; } static char* fsck_check_dir_recursive (const char *id, const char *parent_dir, FsckData *fsck_data) { SeafDir *dir; SeafDir *new_dir; GList *p; SeafDirent *seaf_dent; char *dir_id = NULL; char *path = NULL; gboolean io_error = FALSE; SeafFSManager *mgr = seaf->fs_mgr; char *store_id = fsck_data->repo->store_id; int version = fsck_data->repo->version; gboolean is_corrupted = FALSE; dir = seaf_fs_manager_get_seafdir (mgr, store_id, version, id); if (!dir) { goto out; } for (p = dir->entries; p; p = p->next) { seaf_dent = p->data; io_error = FALSE; if (S_ISREG(seaf_dent->mode)) { path = g_strdup_printf ("%s%s", parent_dir, seaf_dent->name); if (!path) { seaf_warning ("Out of memory, stop to run fsck for repo %.8s.\n", fsck_data->repo->id); goto out; } if (!fsck_verify_seafobj (store_id, version, seaf_dent->id, &io_error, VERIFY_FILE, fsck_data->options->repair)) { if (io_error) { g_free (path); goto out; } is_corrupted = TRUE; if (fsck_data->options->repair) { seaf_message ("Repo[%.8s] file %s(%.8s) is damaged, recreate an empty file.\n", fsck_data->repo->id, path, seaf_dent->id); } else { seaf_message ("Repo[%.8s] file %s(%.8s) is damaged.\n", fsck_data->repo->id, path, seaf_dent->id); } // file damaged, set it empty memcpy (seaf_dent->id, EMPTY_SHA1, 40); seaf_dent->mtime = (gint64)time(NULL); seaf_dent->size = 0; fsck_data->repaired_files = g_list_prepend (fsck_data->repaired_files, g_strdup(path)); } else { if (check_blocks (seaf_dent->id, fsck_data, &io_error) < 0) { if (io_error) { seaf_message ("Failed to check blocks for repo[%.8s] file %s(%.8s).\n", fsck_data->repo->id, path, seaf_dent->id); g_free (path); goto out; } is_corrupted = TRUE; if (fsck_data->options->repair) { seaf_message ("Repo[%.8s] file %s(%.8s) is damaged, recreate an empty file.\n", fsck_data->repo->id, path, seaf_dent->id); } else { seaf_message ("Repo[%.8s] file %s(%.8s) is damaged.\n", fsck_data->repo->id, path, seaf_dent->id); } // file damaged, set it empty memcpy (seaf_dent->id, EMPTY_SHA1, 40); seaf_dent->mtime = (gint64)time(NULL); seaf_dent->size = 0; fsck_data->repaired_files = g_list_prepend (fsck_data->repaired_files, g_strdup(path)); } else if (fsck_data->options->check_file_size) { check_file_size (fsck_data, seaf_dent, path); } } g_free (path); } else if (S_ISDIR(seaf_dent->mode)) { path = g_strdup_printf ("%s%s/", parent_dir, seaf_dent->name); if (!path) { seaf_warning ("Out of memory, stop to run fsck for repo [%.8s].\n", fsck_data->repo->id); goto out; } if (!fsck_verify_seafobj (store_id, version, seaf_dent->id, &io_error, VERIFY_DIR, fsck_data->options->repair)) { if (io_error) { g_free (path); goto out; } if (fsck_data->options->repair) { seaf_message ("Repo[%.8s] dir %s(%.8s) is damaged, recreate an empty dir.\n", fsck_data->repo->id, path, seaf_dent->id); } else { seaf_message ("Repo[%.8s] dir %s(%.8s) is damaged.\n", fsck_data->repo->id, path, seaf_dent->id); } is_corrupted = TRUE; // dir damaged, set it empty memcpy (seaf_dent->id, EMPTY_SHA1, 40); fsck_data->repaired_folders = g_list_prepend (fsck_data->repaired_folders, g_strdup(path)); } else { char *sub_dir_id = fsck_check_dir_recursive (seaf_dent->id, path, fsck_data); if (sub_dir_id == NULL) { // IO error g_free (path); goto out; } if (strcmp (sub_dir_id, seaf_dent->id) != 0) { is_corrupted = TRUE; // dir damaged, set it to new dir_id memcpy (seaf_dent->id, sub_dir_id, 41); } g_free (sub_dir_id); } g_free (path); } } if (is_corrupted) { new_dir = seaf_dir_new (NULL, dir->entries, version); if (fsck_data->options->repair) { if (seaf_dir_save (mgr, store_id, version, new_dir) < 0) { seaf_warning ("Repo[%.8s] failed to save dir\n", fsck_data->repo->id); seaf_dir_free (new_dir); // dir->entries was taken by new_dir, which has been freed. dir->entries = NULL; goto out; } } dir_id = g_strdup (new_dir->dir_id); seaf_dir_free (new_dir); dir->entries = NULL; } else { dir_id = g_strdup (dir->dir_id); } out: seaf_dir_free (dir); return dir_id; } static char * gen_repair_commit_desc (GList *repaired_files, GList *repaired_folders) { GString *desc = g_string_new("Repaired by system."); GList *p; char *path; if (!repaired_files && !repaired_folders) return g_string_free (desc, FALSE); if (repaired_files) { g_string_append (desc, "\nDamaged files:\n"); for (p = repaired_files; p; p = p->next) { path = p->data; g_string_append_printf (desc, "%s\n", path); } } if (repaired_folders) { g_string_append (desc, "\nDamaged folders:\n"); for (p = repaired_folders; p; p = p->next) { path = p->data; g_string_append_printf (desc, "%s\n", path); } } return g_string_free (desc, FALSE); } static void reset_commit_to_repair (SeafRepo *repo, SeafCommit *parent, char *new_root_id, GList *repaired_files, GList *repaired_folders) { if (seaf_delete_repo_tokens (repo) < 0) { seaf_warning ("Failed to delete repo sync tokens, abort repair.\n"); return; } char *desc = gen_repair_commit_desc (repaired_files, repaired_folders); SeafCommit *new_commit = NULL; new_commit = seaf_commit_new (NULL, repo->id, new_root_id, parent->creator_name, parent->creator_id, desc, 0); g_free (desc); if (!new_commit) { seaf_warning ("Out of memory, stop to run fsck for repo %.8s.\n", repo->id); return; } new_commit->parent_id = g_strdup (parent->commit_id); seaf_repo_to_commit (repo, new_commit); seaf_message ("Update repo %.8s status to commit %.8s.\n", repo->id, new_commit->commit_id); seaf_branch_set_commit (repo->head, new_commit->commit_id); if (seaf_branch_manager_add_branch (seaf->branch_mgr, repo->head) < 0) { seaf_warning ("Update head of repo %.8s to commit %.8s failed, " "recover failed.\n", repo->id, new_commit->commit_id); } else { seaf_commit_manager_add_commit (seaf->commit_mgr, new_commit); } seaf_commit_unref (new_commit); } /* * check and recover repo, for damaged file or folder set it empty */ static void check_and_recover_repo (SeafRepo *repo, gboolean reset, FsckOptions *options) { FsckData fsck_data; SeafCommit *rep_commit = NULL; char *root_id = NULL; seaf_message ("Checking file system integrity of repo %s(%.8s)...\n", repo->name, repo->id); rep_commit = seaf_commit_manager_get_commit (seaf->commit_mgr, repo->id, repo->version, repo->head->commit_id); if (!rep_commit) { seaf_warning ("Failed to load commit %s of repo %s\n", repo->head->commit_id, repo->id); return; } memset (&fsck_data, 0, sizeof(fsck_data)); fsck_data.options = options; fsck_data.repo = repo; fsck_data.existing_blocks = g_hash_table_new_full (g_str_hash, g_str_equal, g_free, NULL); if (options->check_file_size) { fsck_data.truncate_time = seaf_repo_manager_get_repo_truncate_time (seaf->repo_mgr, repo->id); } root_id = fsck_check_dir_recursive (rep_commit->root_id, "/", &fsck_data); g_hash_table_destroy (fsck_data.existing_blocks); if (root_id == NULL) { goto out; } if (options->repair) { if (strcmp (root_id, rep_commit->root_id) != 0) { // some fs objects damaged for the head commit, // create new head commit using the new root_id reset_commit_to_repair (repo, rep_commit, root_id, fsck_data.repaired_files, fsck_data.repaired_folders); } else if (reset) { // for reset commit but fs objects not damaged, also create a repaired commit reset_commit_to_repair (repo, rep_commit, rep_commit->root_id, NULL, NULL); } } out: g_list_free_full (fsck_data.repaired_files, g_free); g_list_free_full (fsck_data.repaired_folders, g_free); g_free (root_id); seaf_commit_unref (rep_commit); } static gint compare_commit_by_ctime (gconstpointer a, gconstpointer b) { const SeafCommit *commit_a = a; const SeafCommit *commit_b = b; return (commit_b->ctime - commit_a->ctime); } static gboolean fsck_get_repo_commit (const char *repo_id, int version, const char *obj_id, void *commit_list) { void *data = NULL; int data_len; GList **cur_list = (GList **)commit_list; int ret = seaf_obj_store_read_obj (seaf->commit_mgr->obj_store, repo_id, version, obj_id, &data, &data_len); if (ret < 0 || data == NULL) return TRUE; SeafCommit *cur_commit = seaf_commit_from_data (obj_id, data, data_len); if (cur_commit != NULL) { *cur_list = g_list_prepend (*cur_list, cur_commit); } g_free(data); return TRUE; } static SeafRepo* get_available_repo (char *repo_id, gboolean repair) { GList *commit_list = NULL; GList *temp_list = NULL; SeafCommit *temp_commit = NULL; SeafBranch *branch = NULL; SeafRepo *repo = NULL; SeafVirtRepo *vinfo = NULL; gboolean io_error; seaf_message ("Scanning available commits...\n"); seaf_obj_store_foreach_obj (seaf->commit_mgr->obj_store, repo_id, 1, fsck_get_repo_commit, &commit_list); if (commit_list == NULL) { seaf_warning ("No available commits for repo %.8s, can't be repaired.\n", repo_id); return NULL; } commit_list = g_list_sort (commit_list, compare_commit_by_ctime); repo = seaf_repo_new (repo_id, NULL, NULL); if (repo == NULL) { seaf_warning ("Out of memory, stop to run fsck for repo %.8s.\n", repo_id); goto out; } vinfo = seaf_repo_manager_get_virtual_repo_info (seaf->repo_mgr, repo_id); if (vinfo) { repo->is_virtual = TRUE; memcpy (repo->store_id, vinfo->origin_repo_id, 36); seaf_virtual_repo_info_free (vinfo); } else { repo->is_virtual = FALSE; memcpy (repo->store_id, repo->id, 36); } for (temp_list = commit_list; temp_list; temp_list = temp_list->next) { temp_commit = temp_list->data; io_error = FALSE; if (!fsck_verify_seafobj (repo->store_id, 1, temp_commit->root_id, &io_error, VERIFY_DIR, repair)) { if (io_error) { seaf_repo_unref (repo); repo = NULL; goto out; } // fs object of this commit is damaged, // continue to verify next continue; } branch = seaf_branch_new ("master", repo_id, temp_commit->commit_id); if (branch == NULL) { seaf_warning ("Out of memory, stop to run fsck for repo %.8s.\n", repo_id); seaf_repo_unref (repo); repo = NULL; goto out; } repo->head = branch; seaf_repo_from_commit (repo, temp_commit); char time_buf[64]; strftime (time_buf, 64, "%Y-%m-%d %H:%M:%S", localtime((time_t *)&temp_commit->ctime)); seaf_message ("Find available commit %.8s(created at %s) for repo %.8s.\n", temp_commit->commit_id, time_buf, repo_id); break; } out: for (temp_list = commit_list; temp_list; temp_list = temp_list->next) { temp_commit = temp_list->data; seaf_commit_unref (temp_commit); } g_list_free (commit_list); if (!repo || !repo->head) { seaf_warning("No available commits found for repo %.8s, can't be repaired.\n", repo_id); seaf_repo_unref (repo); return NULL; } return repo; } static void repair_repo(char *repo_id, FsckOptions *options) { gboolean exists; gboolean reset = FALSE; SeafRepo *repo; gboolean io_error; seaf_message ("Running fsck for repo %s.\n", repo_id); if (!is_uuid_valid (repo_id)) { seaf_warning ("Invalid repo id %s.\n", repo_id); goto next; } exists = seaf_repo_manager_repo_exists (seaf->repo_mgr, repo_id); if (!exists) { seaf_warning ("Repo %.8s doesn't exist.\n", repo_id); goto next; } repo = seaf_repo_manager_get_repo (seaf->repo_mgr, repo_id); if (!repo) { seaf_message ("Repo %.8s HEAD commit is damaged, " "need to restore to an old version.\n", repo_id); repo = get_available_repo (repo_id, options->repair); if (!repo) { goto next; } reset = TRUE; } else { SeafCommit *commit = seaf_commit_manager_get_commit (seaf->commit_mgr, repo->id, repo->version, repo->head->commit_id); if (!commit) { seaf_warning ("Failed to get head commit %s of repo %s\n", repo->head->commit_id, repo->id); seaf_repo_unref (repo); goto next; } io_error = FALSE; if (!fsck_verify_seafobj (repo->store_id, repo->version, commit->root_id, &io_error, VERIFY_DIR, options->repair)) { if (io_error) { seaf_commit_unref (commit); seaf_repo_unref (repo); goto next; } else { // root fs object is damaged, get available commit seaf_message ("Repo %.8s HEAD commit is damaged, " "need to restore to an old version.\n", repo_id); seaf_commit_unref (commit); seaf_repo_unref (repo); repo = get_available_repo (repo_id, options->repair); if (!repo) { goto next; } reset = TRUE; } } else { // head commit is available seaf_commit_unref (commit); } } check_and_recover_repo (repo, reset, options); seaf_repo_unref (repo); next: seaf_message ("Fsck finished for repo %.8s.\n\n", repo_id); } static void repair_repo_with_thread_pool(gpointer data, gpointer user_data) { CheckAndRecoverRepoObj *obj = data; repair_repo(obj->repo_id, obj->options); g_free(obj); } static void repair_repos (GList *repo_id_list, FsckOptions *options) { GList *ptr; char *repo_id; GThreadPool *pool; if (options->max_thread_num) { pool = g_thread_pool_new( (GFunc)repair_repo_with_thread_pool, NULL, options->max_thread_num, FALSE, NULL); if (!pool) { seaf_warning ("Failed to create check and recover repo thread pool.\n"); return; } } for (ptr = repo_id_list; ptr; ptr = ptr->next) { repo_id = ptr->data; if (options->max_thread_num) { CheckAndRecoverRepoObj *obj = g_new0(CheckAndRecoverRepoObj, 1); obj->repo_id = repo_id; obj->options = options; g_thread_pool_push(pool, obj, NULL); } else { repair_repo(repo_id, options); } } if (options->max_thread_num) { g_thread_pool_free(pool, FALSE, TRUE); } } int seaf_fsck (GList *repo_id_list, FsckOptions *options) { if (!repo_id_list) repo_id_list = seaf_repo_manager_get_repo_id_list (seaf->repo_mgr); repair_repos (repo_id_list, options); while (repo_id_list) { g_free (repo_id_list->data); repo_id_list = g_list_delete_link (repo_id_list, repo_id_list); } return 0; } /* Export files. */ /*static gboolean write_enc_block_to_file (const char *repo_id, int version, const char *block_id, SeafileCrypt *crypt, int fd, const char *path) { BlockHandle *handle; BlockMetadata *bmd; char buf[64 * 1024]; int n; int remain; EVP_CIPHER_CTX ctx; char *dec_out; int dec_out_len; gboolean ret = TRUE; bmd = seaf_block_manager_stat_block (seaf->block_mgr, repo_id, version, block_id); if (!bmd) { seaf_warning ("Failed to stat block %s.\n", block_id); return FALSE; } handle = seaf_block_manager_open_block (seaf->block_mgr, repo_id, version, block_id, BLOCK_READ); if (!handle) { seaf_warning ("Failed to open block %s.\n", block_id); g_free (bmd); return FALSE; } if (seafile_decrypt_init (&ctx, crypt->version, crypt->key, crypt->iv) < 0) { seaf_warning ("Failed to init decrypt.\n"); ret = FALSE; goto out; } remain = bmd->size; while (1) { n = seaf_block_manager_read_block (seaf->block_mgr, handle, buf, sizeof(buf)); if (n < 0) { seaf_warning ("Failed to read block %s.\n", block_id); ret = FALSE; break; } else if (n == 0) { break; } remain -= n; dec_out = g_new0 (char, n + 16); if (!dec_out) { seaf_warning ("Failed to alloc memory.\n"); ret = FALSE; break; } if (EVP_DecryptUpdate (&ctx, (unsigned char *)dec_out, &dec_out_len, (unsigned char *)buf, n) == 0) { seaf_warning ("Failed to decrypt block %s .\n", block_id); g_free (dec_out); ret = FALSE; break; } if (writen (fd, dec_out, dec_out_len) != dec_out_len) { seaf_warning ("Failed to write block %s to file %s.\n", block_id, path); g_free (dec_out); ret = FALSE; break; } if (remain == 0) { if (EVP_DecryptFinal_ex (&ctx, (unsigned char *)dec_out, &dec_out_len) == 0) { seaf_warning ("Failed to decrypt block %s .\n", block_id); g_free (dec_out); ret = FALSE; break; } if (dec_out_len > 0) { if (writen (fd, dec_out, dec_out_len) != dec_out_len) { seaf_warning ("Failed to write block %s to file %s.\n", block_id, path); g_free (dec_out); ret = FALSE; break; } } } g_free (dec_out); } EVP_CIPHER_CTX_cleanup (&ctx); out: g_free (bmd); seaf_block_manager_close_block (seaf->block_mgr, handle); seaf_block_manager_block_handle_free (seaf->block_mgr, handle); return ret; }*/ static gboolean write_nonenc_block_to_file (const char *repo_id, int version, const char *block_id, const gint64 mtime, int fd, const char *path) { BlockHandle *handle; char buf[64 * 1024]; gboolean ret = TRUE; int n; handle = seaf_block_manager_open_block (seaf->block_mgr, repo_id, version, block_id, BLOCK_READ); if (!handle) { return FALSE; } while (1) { n = seaf_block_manager_read_block (seaf->block_mgr, handle, buf, sizeof(buf)); if (n < 0) { seaf_warning ("Failed to read block %s.\n", block_id); ret = FALSE; break; } else if (n == 0) { break; } if (writen (fd, buf, n) != n) { seaf_warning ("Failed to write block %s to file %s.\n", block_id, path); ret = FALSE; break; } } struct utimbuf timebuf; timebuf.modtime = mtime; timebuf.actime = mtime; if(utime(path, &timebuf) == -1) { seaf_warning ("Current file (%s) lose it\"s mtime.\n", path); } seaf_block_manager_close_block (seaf->block_mgr, handle); seaf_block_manager_block_handle_free (seaf->block_mgr, handle); return ret; } static void create_file (const char *repo_id, const char *file_id, const gint64 mtime, const char *path) { int i; char *block_id; int fd; Seafile *seafile; gboolean ret = TRUE; int version = 1; fd = g_open (path, O_CREAT | O_WRONLY | O_BINARY, 0666); if (fd < 0) { seaf_warning ("Open file %s failed: %s.\n", path, strerror (errno)); return; } seafile = seaf_fs_manager_get_seafile (seaf->fs_mgr, repo_id, version, file_id); if (!seafile) { ret = FALSE; goto out; } for (i = 0; i < seafile->n_blocks; ++i) { block_id = seafile->blk_sha1s[i]; ret = write_nonenc_block_to_file (repo_id, version, block_id, mtime, fd, path); if (!ret) { break; } } out: close (fd); if (!ret) { if (g_unlink (path) < 0) { seaf_warning ("Failed to delete file %s: %s.\n", path, strerror (errno)); } seaf_message ("Failed to export file %s.\n", path); } else { seaf_message ("Export file %s.\n", path); } seafile_unref (seafile); } static void export_repo_files_recursive (const char *repo_id, const char *id, const char *parent_dir) { SeafDir *dir; GList *p; SeafDirent *seaf_dent; char *path; SeafFSManager *mgr = seaf->fs_mgr; int version = 1; dir = seaf_fs_manager_get_seafdir (mgr, repo_id, version, id); if (!dir) { return; } for (p = dir->entries; p; p = p->next) { seaf_dent = p->data; path = g_build_filename (parent_dir, seaf_dent->name, NULL); if (S_ISREG(seaf_dent->mode)) { // create file create_file (repo_id, seaf_dent->id, seaf_dent->mtime, path); } else if (S_ISDIR(seaf_dent->mode)) { if (g_mkdir (path, 0777) < 0) { seaf_warning ("Failed to mkdir %s: %s.\n", path, strerror (errno)); g_free (path); continue; } else { seaf_message ("Export dir %s.\n", path); } export_repo_files_recursive (repo_id, seaf_dent->id, path); } g_free (path); } seaf_dir_free (dir); } static SeafCommit* get_available_commit (const char *repo_id) { GList *commit_list = NULL; GList *temp_list = NULL; GList *next_list = NULL; SeafCommit *temp_commit = NULL; gboolean io_error; seaf_message ("Scanning available commits for repo %s...\n", repo_id); seaf_obj_store_foreach_obj (seaf->commit_mgr->obj_store, repo_id, 1, fsck_get_repo_commit, &commit_list); if (commit_list == NULL) { seaf_warning ("No available commits for repo %.8s, export failed.\n\n", repo_id); return NULL; } commit_list = g_list_sort (commit_list, compare_commit_by_ctime); temp_list = commit_list; while (temp_list) { next_list = temp_list->next; temp_commit = temp_list->data; io_error = FALSE; if (memcmp (temp_commit->root_id, EMPTY_SHA1, 40) == 0) { seaf_commit_unref (temp_commit); temp_commit = NULL; temp_list = next_list; continue; } else if (!fsck_verify_seafobj (repo_id, 1, temp_commit->root_id, &io_error, VERIFY_DIR, FALSE)) { seaf_commit_unref (temp_commit); temp_commit = NULL; temp_list = next_list; if (io_error) { break; } // fs object of this commit is damaged, // continue to verify next continue; } char time_buf[64]; strftime (time_buf, 64, "%Y-%m-%d %H:%M:%S", localtime((time_t *)&temp_commit->ctime)); seaf_message ("Find available commit %.8s(created at %s), will export files from it.\n", temp_commit->commit_id, time_buf); temp_list = next_list; break; } while (temp_list) { seaf_commit_unref (temp_list->data); temp_list = temp_list->next; } g_list_free (commit_list); if (!temp_commit && !io_error) { seaf_warning ("No available commits for repo %.8s, export failed.\n\n", repo_id); } return temp_commit; } void export_repo_files (const char *repo_id, const char *init_path, GHashTable *enc_repos) { SeafCommit *commit = get_available_commit (repo_id); if (!commit) { return; } if (commit->encrypted) { g_hash_table_insert (enc_repos, g_strdup (repo_id), g_strdup (commit->repo_name)); seaf_commit_unref (commit); return; } seaf_message ("Start to export files for repo %.8s(%s).\n", repo_id, commit->repo_name); char *dir_name = g_strdup_printf ("%.8s_%s_%s", repo_id, commit->repo_name, commit->creator_name); char * export_path = g_build_filename (init_path, dir_name, NULL); g_free (dir_name); if (g_mkdir (export_path, 0777) < 0) { seaf_warning ("Failed to create export dir %s: %s, export failed.\n", export_path, strerror (errno)); g_free (export_path); seaf_commit_unref (commit); return; } export_repo_files_recursive (repo_id, commit->root_id, export_path); seaf_message ("Finish exporting files for repo %.8s.\n\n", repo_id); g_free (export_path); seaf_commit_unref (commit); } static GList * get_repo_ids (const char *seafile_dir) { GList *repo_ids = NULL; char *commit_path = g_build_filename (seafile_dir, "storage", "commits", NULL); GError *error = NULL; GDir *dir = g_dir_open (commit_path, 0, &error); if (!dir) { seaf_warning ("Open dir %s failed: %s.\n", commit_path, error->message); g_clear_error (&error); g_free (commit_path); return NULL; } const char *file_name; while ((file_name = g_dir_read_name (dir)) != NULL) { repo_ids = g_list_prepend (repo_ids, g_strdup (file_name)); } g_dir_close (dir); g_free (commit_path); return repo_ids; } static void print_enc_repo (gpointer key, gpointer value, gpointer user_data) { seaf_message ("%s(%s)\n", (char *)key, (char *)value); } void export_file (GList *repo_id_list, const char *seafile_dir, char *export_path) { struct stat dir_st; if (stat (export_path, &dir_st) < 0) { if (errno == ENOENT) { if (g_mkdir (export_path, 0777) < 0) { seaf_warning ("Mkdir %s failed: %s.\n", export_path, strerror (errno)); return; } } else { seaf_warning ("Stat path: %s failed: %s.\n", export_path, strerror (errno)); return; } } else { if (!S_ISDIR(dir_st.st_mode)) { seaf_warning ("%s already exist, but it is not a directory.\n", export_path); return; } } if (!repo_id_list) { repo_id_list = get_repo_ids (seafile_dir); if (!repo_id_list) return; } GList *iter = repo_id_list; char *repo_id; GHashTable *enc_repos = g_hash_table_new_full (g_str_hash, g_str_equal, g_free, g_free); for (; iter; iter=iter->next) { repo_id = iter->data; if (!is_uuid_valid (repo_id)) { seaf_warning ("Invalid repo id: %s.\n", repo_id); continue; } export_repo_files (repo_id, export_path, enc_repos); } if (g_hash_table_size (enc_repos) > 0) { seaf_message ("The following repos are encrypted and are not exported:\n"); g_hash_table_foreach (enc_repos, print_enc_repo, NULL); } while (repo_id_list) { g_free (repo_id_list->data); repo_id_list = g_list_delete_link (repo_id_list, repo_id_list); } g_hash_table_destroy (enc_repos); g_free (export_path); } ================================================ FILE: server/gc/fsck.h ================================================ #ifndef SEAF_FSCK_H #define SEAF_FSCK_H typedef struct FsckOptions { int max_thread_num; gboolean check_integrity; gboolean check_file_size; gboolean repair; } FsckOptions; int seaf_fsck (GList *repo_id_list, FsckOptions *options); void export_file (GList *repo_id_list, const char *seafile_dir, char *export_path); #endif ================================================ FILE: server/gc/gc-core.c ================================================ /* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */ #include "common.h" #include "seafile-session.h" #include "bloom-filter.h" #include "gc-core.h" #include "utils.h" #define DEBUG_FLAG SEAFILE_DEBUG_OTHER #include "log.h" #include #define MAX_BF_SIZE (((size_t)1) << 29) /* 64 MB */ #define KEEP_ALIVE_PER_OBJS 100 #define KEEP_ALIVE_PER_SECOND 1 /* * The number of bits in the bloom filter is 4 times the number of all blocks. * Let m be the bits in the bf, n be the number of blocks to be added to the bf * (the number of live blocks), and k = 3 (closed to optimal for m/n = 4), * the probability of false-positive is * * p = (1 - e^(-kn/m))^k = 0.15 * * Because m = 4 * total_blocks >= 4 * (live blocks) = 4n, we should have p <= 0.15. * Put it another way, we'll clean up at least 85% dead blocks in each gc operation. * See http://en.wikipedia.org/wiki/Bloom_filter. * * Supose we have 8TB space, and the avg block size is 1MB, we'll have 8M blocks, then * the size of bf is (8M * 4)/8 = 4MB. * * If total_blocks is a small number (e.g. < 100), we should try to clean all dead blocks. * So we set the minimal size of the bf to 1KB. */ /* * Online GC algorithm * * There is a table `GCID` in the seafile database. Every time GC is run for a repo, * a new GC ID (UUID) will be generated and inserted into this table. * * Other threads that want to update the branch head of a repo must do so as follows: * 1. Read the GC ID from the table before wrting blocks; * 2. begin a transaction; * 3. Read the GC ID again with `SELECT ... FOR UPDATE`; * 4. Compare the new GC ID with the previous one. If they are the same, proceed to * update the branch head; otherwise, a GC operation has been run between * steps 1 and 3, the branch update operation must be failed. * 5. Commit or rollback the transaction. * * For syncing clients, the algorithm is a bit more complicated. * Because writing blocks and updating the branch head is not executed in the same * context (or more precisely, not in the same thread), the GC ID read in step 1 * has to be stored into a database table `LastGCID (client_token, gc_id)`. * After step 4, no matter the branch update succeeds or not, the entry in `LastGCID` * table has to be deleted. */ static Bloom * alloc_gc_index (const char *repo_id, guint64 total_blocks) { size_t size; size = (size_t) MAX(total_blocks << 2, 1 << 13); size = MIN (size, MAX_BF_SIZE); seaf_message ("GC index size is %u Byte for repo %.8s.\n", (int)size >> 3, repo_id); return bloom_create (size, 3, 0); } typedef struct { SeafRepo *repo; Bloom *blocks_index; Bloom *fs_index; GHashTable *visited; GHashTable *visited_commits; /* > 0: keep a period of history; * == 0: only keep data in head commit; * < 0: keep all history data. */ gint64 truncate_time; gboolean traversed_head; int traversed_commits; gint64 traversed_blocks; int verbose; gint64 traversed_fs_objs; SeafDBTrans *trans; gint64 keep_alive_last_time; gint64 keep_alive_obj_counter; gboolean traverse_base_commit; } GCData; static int add_blocks_to_index (SeafFSManager *mgr, GCData *data, const char *file_id) { SeafRepo *repo = data->repo; Bloom *blocks_index = data->blocks_index; Seafile *seafile; int i; seafile = seaf_fs_manager_get_seafile (mgr, repo->store_id, repo->version, file_id); if (!seafile) { seaf_warning ("Failed to find file %s:%s.\n", repo->store_id, file_id); return -1; } for (i = 0; i < seafile->n_blocks; ++i) { bloom_add (blocks_index, seafile->blk_sha1s[i]); ++data->traversed_blocks; } seafile_unref (seafile); return 0; } static void add_fs_to_index(GCData *data, const char *file_id) { Bloom *fs_index = data->fs_index; if (fs_index) { bloom_add (fs_index, file_id); } ++(data->traversed_fs_objs); } static gboolean fs_callback (SeafFSManager *mgr, const char *store_id, int version, const char *obj_id, int type, void *user_data, gboolean *stop) { GCData *data = user_data; if (data->visited != NULL) { if (g_hash_table_lookup (data->visited, obj_id) != NULL) { *stop = TRUE; return TRUE; } char *key = g_strdup(obj_id); g_hash_table_replace (data->visited, key, key); } if (data->trans) { ++(data->keep_alive_obj_counter); if (data->keep_alive_obj_counter >= KEEP_ALIVE_PER_OBJS && ((gint64)time(NULL) - data->keep_alive_last_time) >= KEEP_ALIVE_PER_SECOND) { data->keep_alive_last_time = (gint64)time(NULL); data->keep_alive_obj_counter = 0; seaf_db_trans_query(data->trans, "SELECT 1;", 0); } } add_fs_to_index(data, obj_id); // If traversing the base_commit, only the fs objects need to be retained, while the block does not. // This is because only the fs objects are needed when merging virtual repo. if (data->traverse_base_commit) { return TRUE; } if (type == SEAF_METADATA_TYPE_FILE && add_blocks_to_index (mgr, data, obj_id) < 0) return FALSE; return TRUE; } static gboolean traverse_commit (SeafCommit *commit, void *vdata, gboolean *stop) { GCData *data = vdata; int ret; if (g_hash_table_lookup (data->visited_commits, commit->commit_id)) { // Has traversed on prev head commit, stop traverse from this branch *stop = TRUE; return TRUE; } if (data->truncate_time == 0) { *stop = TRUE; /* Stop after traversing the head commit. */ } else if (data->truncate_time > 0 && (gint64)(commit->ctime) < data->truncate_time && data->traversed_head) { /* Still traverse the first commit older than truncate_time. * If a file in the child commit of this commit is deleted, * we need to access this commit in order to restore it * from trash. */ *stop = TRUE; } if (!data->traversed_head) data->traversed_head = TRUE; if (data->verbose) seaf_message ("Traversing commit %.8s for repo %.8s.\n", commit->commit_id, data->repo->id); ++data->traversed_commits; data->traversed_fs_objs = 0; ret = seaf_fs_manager_traverse_tree (seaf->fs_mgr, data->repo->store_id, data->repo->version, commit->root_id, fs_callback, data, FALSE); if (ret < 0) return FALSE; int dummy; g_hash_table_replace (data->visited_commits, g_strdup (commit->commit_id), &dummy); if (data->verbose) seaf_message ("Traversed %"G_GINT64_FORMAT" fs objects for repo %.8s.\n", data->traversed_fs_objs, data->repo->id); return TRUE; } static int update_gc_id (SeafRepo *repo, SeafDBTrans *trans) { char *sql; char *gc_id; gboolean id_exists, db_err = FALSE; int ret; sql = "SELECT 1 FROM GCID WHERE repo_id = ?"; id_exists = seaf_db_trans_check_for_existence (trans, sql, &db_err, 1, "string", repo->id); gc_id = gen_uuid (); if (id_exists) { sql = "UPDATE GCID SET gc_id = ? WHERE repo_id = ?"; ret = seaf_db_trans_query (trans, sql, 2, "string", gc_id, "string", repo->id); } else { sql = "INSERT INTO GCID (repo_id, gc_id) VALUES (?, ?)"; ret = seaf_db_trans_query (trans, sql, 2, "string", repo->id, "string", gc_id); } g_free (gc_id); return ret; } static void update_valid_since_time (SeafRepo *repo, gint64 new_time) { gint64 old_time = seaf_repo_manager_get_repo_valid_since (repo->manager, repo->id); if (new_time > 0) { if (new_time > old_time) seaf_repo_manager_set_repo_valid_since (repo->manager, repo->id, new_time); } else if (new_time == 0) { /* Only the head commit is valid after GC if no history is kept. */ SeafCommit *head = seaf_commit_manager_get_commit (seaf->commit_mgr, repo->id, repo->version, repo->head->commit_id); if (head && (old_time < 0 || head->ctime > (guint64)old_time)) seaf_repo_manager_set_repo_valid_since (repo->manager, repo->id, head->ctime); seaf_commit_unref (head); } } static GCData * gc_data_new (SeafRepo *repo, Bloom *blocks_index, Bloom *fs_index, int verbose) { GCData *data; data = g_new0(GCData, 1); seaf_repo_ref(repo); data->repo = repo; data->blocks_index = blocks_index; data->fs_index = fs_index; data->visited = g_hash_table_new_full (g_str_hash, g_str_equal, g_free, NULL); data->visited_commits = g_hash_table_new_full (g_str_hash, g_str_equal, g_free, NULL); data->verbose = verbose; gint64 truncate_time; truncate_time = seaf_repo_manager_get_repo_truncate_time (repo->manager, repo->id); update_valid_since_time (repo, truncate_time); data->truncate_time = truncate_time; data->keep_alive_last_time = (gint64)time(NULL); data->keep_alive_obj_counter = 0; return data; } static void gc_data_free (GCData *data) { if (!data) return; seaf_repo_unref(data->repo); g_hash_table_destroy (data->visited); g_hash_table_destroy (data->visited_commits); g_free (data); return; } static gint64 populate_gc_index_for_repo_for_new_commits (GCData *data, SeafDBTrans *trans) { SeafBranch *new_branch = NULL; gint64 n_blocks_last = 0; int n_commits_last = 0; gboolean res; gint64 ret = 0; SeafRepo *repo = data->repo; if (!repo->is_virtual) { if (trans != NULL && update_gc_id (repo, trans) < 0) { seaf_warning ("Failed to update GCID for repo %s.\n", repo->id); ret = -1; goto out; } } n_blocks_last = data->traversed_blocks; n_commits_last = data->traversed_commits; data->traversed_blocks = 0; data->traversed_commits = 0; data->trans = trans; new_branch = seaf_branch_manager_get_branch (seaf->branch_mgr, repo->id, "master"); if (!new_branch) { seaf_warning ("Failed to get master branch of repo %.8s.\n", repo->id); ret = -1; goto out; } if (g_strcmp0 (repo->head->commit_id, new_branch->commit_id) != 0) { res = seaf_commit_manager_traverse_commit_tree (seaf->commit_mgr, repo->id, repo->version, new_branch->commit_id, traverse_commit, data, FALSE); if (!res) { ret = -1; seaf_warning ("Failed to populate index for repo %.8s.\n", repo->id); goto out; } } seaf_message ("Traversed %d commits, %"G_GINT64_FORMAT" blocks for repo %.8s.\n", data->traversed_commits + n_commits_last, data->traversed_blocks + n_blocks_last, repo->id); ret = data->traversed_blocks; out: seaf_branch_unref (new_branch); return ret; } static gint64 populate_gc_index_for_repo (GCData *data, SeafDBTrans *trans) { gboolean res; gint64 ret = 0; SeafRepo *repo = data->repo; data->trans = trans; if (!repo->is_virtual) seaf_message ("Populating index for repo %.8s.\n", repo->id); else seaf_message ("Populating index for sub-repo %.8s.\n", repo->id); res = seaf_commit_manager_traverse_commit_tree (seaf->commit_mgr, repo->id, repo->version, repo->head->commit_id, traverse_commit, data, FALSE); if (!res) { ret = -1; seaf_warning ("Failed to populate index for repo %.8s.\n", repo->id); return -1; } // Traverse the base commit of the virtual repo. Otherwise, if the virtual repo has not been updated for a long time, // the fs object corresponding to the base commit will be removed by mistake. if (!repo->is_virtual) { GList *vrepo_ids = NULL, *ptr; char *repo_id = NULL; SeafVirtRepo *vinfo = NULL; vrepo_ids = seaf_repo_manager_get_virtual_repo_ids_by_origin (seaf->repo_mgr, repo->id); for (ptr = vrepo_ids; ptr; ptr = ptr->next) { repo_id = ptr->data; vinfo = seaf_repo_manager_get_virtual_repo_info (seaf->repo_mgr, repo_id); if (!vinfo) { continue; } data->traverse_base_commit = TRUE; res = seaf_commit_manager_traverse_commit_tree (seaf->commit_mgr, repo->store_id, repo->version, vinfo->base_commit, traverse_commit, data, FALSE); data->traverse_base_commit = FALSE; seaf_virtual_repo_info_free (vinfo); if (!res) { seaf_warning ("Failed to traverse base commit %s for virtual repo %s.\n", vinfo->base_commit, repo_id); string_list_free (vrepo_ids); return -1; } } string_list_free (vrepo_ids); } ret = data->traversed_blocks; return ret; } #define MAX_THREADS 10 typedef struct CheckBlockParam { char *store_id; int repo_version; Bloom *index; int dry_run; GAsyncQueue *async_queue; pthread_mutex_t counter_lock; gint64 removed_blocks; } CheckBlockParam; typedef struct CheckFSParam { char *store_id; int repo_version; Bloom *index; int dry_run; GAsyncQueue *async_queue; pthread_mutex_t counter_lock; gint64 removed_fs; } CheckFSParam; static void check_block_liveness (gpointer data, gpointer user_data) { char *block_id = data; CheckBlockParam *param = user_data; if (!bloom_test (param->index, block_id)) { pthread_mutex_lock (¶m->counter_lock); param->removed_blocks ++; pthread_mutex_unlock (¶m->counter_lock); if (!param->dry_run) seaf_block_manager_remove_block (seaf->block_mgr, param->store_id, param->repo_version, block_id); } g_async_queue_push (param->async_queue, block_id); } static gint64 check_existing_blocks (char *store_id, int repo_version, GHashTable *exist_blocks, Bloom *blocks_index, int dry_run) { char *block_id; GThreadPool *tpool = NULL; GAsyncQueue *async_queue = NULL; CheckBlockParam *param = NULL; GHashTableIter iter; gpointer key, value; gint64 ret = 0; async_queue = g_async_queue_new (); param = g_new0 (CheckBlockParam, 1); param->store_id = store_id; param->repo_version = repo_version; param->index = blocks_index; param->dry_run = dry_run; param->async_queue = async_queue; pthread_mutex_init (¶m->counter_lock, NULL); tpool = g_thread_pool_new (check_block_liveness, param, MAX_THREADS, FALSE, NULL); if (!tpool) { seaf_warning ("Failed to create thread pool for repo %s, stop gc.\n", store_id); ret = -1; goto out; } g_hash_table_iter_init (&iter, exist_blocks); while (g_hash_table_iter_next (&iter, &key, &value)) { g_thread_pool_push (tpool, (char *)key, NULL); } while ((block_id = g_async_queue_pop (async_queue))) { g_hash_table_remove (exist_blocks, block_id); if (g_hash_table_size (exist_blocks) == 0) { break; } } ret = param->removed_blocks; out: g_thread_pool_free (tpool, TRUE, TRUE); g_async_queue_unref (async_queue); g_free (param); return ret; } static gboolean collect_exist_blocks (const char *store_id, int version, const char *block_id, void *vdata) { GHashTable *exist_blocks = vdata; int dummy; g_hash_table_replace (exist_blocks, g_strdup (block_id), &dummy); return TRUE; } static void check_fs_liveness (gpointer data, gpointer user_data) { char *fs_id = data; CheckFSParam *param = user_data; if (!bloom_test (param->index, fs_id)) { pthread_mutex_lock (¶m->counter_lock); param->removed_fs ++; pthread_mutex_unlock (¶m->counter_lock); if (!param->dry_run) seaf_fs_manager_delete_object(seaf->fs_mgr, param->store_id, param->repo_version, fs_id); } g_async_queue_push (param->async_queue, fs_id); } static gint64 check_existing_fs (char *store_id, int repo_version, GHashTable *exist_fs, Bloom *fs_index, int dry_run) { char *fs_id; GThreadPool *tpool = NULL; GAsyncQueue *async_queue = NULL; CheckFSParam *param = NULL; GHashTableIter iter; gpointer key, value; gint64 ret = 0; async_queue = g_async_queue_new (); param = g_new0 (CheckFSParam, 1); param->store_id = store_id; param->repo_version = repo_version; param->index = fs_index; param->dry_run = dry_run; param->async_queue = async_queue; pthread_mutex_init (¶m->counter_lock, NULL); tpool = g_thread_pool_new (check_fs_liveness, param, MAX_THREADS, FALSE, NULL); if (!tpool) { seaf_warning ("Failed to create thread pool for repo %s, stop gc.\n", store_id); ret = -1; goto out; } g_hash_table_iter_init (&iter, exist_fs); while (g_hash_table_iter_next (&iter, &key, &value)) { g_thread_pool_push (tpool, (char *)key, NULL); } while ((fs_id = g_async_queue_pop (async_queue))) { g_hash_table_remove (exist_fs, fs_id); if (g_hash_table_size (exist_fs) == 0) { break; } } ret = param->removed_fs; out: g_thread_pool_free (tpool, TRUE, TRUE); g_async_queue_unref (async_queue); g_free (param); return ret; } static gboolean collect_exist_fs (const char *store_id, int version, const char *fs_id, void *vdata) { GHashTable *exist_fs = vdata; int dummy; g_hash_table_replace (exist_fs, g_strdup (fs_id), &dummy); return TRUE; } static gint64 populate_gc_index_for_virtual_repos_for_new_commits (GList *virtual_repos, SeafDBTrans *trans) { GList *ptr; SeafRepo *vrepo; gint64 scan_ret = 0; gint64 ret = 0; GCData *data = NULL; for (ptr = virtual_repos; ptr; ptr = ptr->next) { data = ptr->data; if (!data) continue; vrepo = data->repo; if (!vrepo) { continue; } scan_ret = populate_gc_index_for_repo_for_new_commits (data, trans); if (scan_ret < 0) { ret = -1; goto out; } ret += scan_ret; } out: return ret; } static gint64 populate_gc_index_for_virtual_repos (SeafRepo *repo, GList **virtual_repos, Bloom *blocks_index, Bloom *fs_index, SeafDBTrans *trans, int verbose) { GList *vrepo_ids = NULL, *ptr; char *repo_id; SeafRepo *vrepo; gint64 scan_ret = 0; gint64 ret = 0; GCData *data; vrepo_ids = seaf_repo_manager_get_virtual_repo_ids_by_origin (seaf->repo_mgr, repo->id); for (ptr = vrepo_ids; ptr; ptr = ptr->next) { repo_id = ptr->data; vrepo = seaf_repo_manager_get_repo (seaf->repo_mgr, repo_id); if (!vrepo) { seaf_warning ("Failed to get repo %s.\n", repo_id); ret = -1; goto out; } data = gc_data_new (vrepo, blocks_index, fs_index, verbose); *virtual_repos = g_list_prepend (*virtual_repos, data); scan_ret = populate_gc_index_for_repo (data, trans); seaf_repo_unref(vrepo); if (scan_ret < 0) { ret = -1; goto out; } ret += scan_ret; } out: string_list_free (vrepo_ids); return ret; } /* * @keep_days: explicitly sepecify how many days of history to keep after GC. * This has higher priority than the history limit set in database. * @online: is running online GC. Online GC is not supported for SQLite DB. */ gint64 gc_v1_repo (SeafRepo *repo, int dry_run, int online, int verbose, int rm_fs) { Bloom *blocks_index = NULL; Bloom *fs_index = NULL; GHashTable *exist_blocks = g_hash_table_new_full (g_str_hash, g_str_equal, g_free, NULL); GHashTable *exist_fs = NULL; GList *virtual_repos = NULL; guint64 total_blocks = 0; guint64 total_fs = 0; guint64 reachable_blocks = 0; gint64 removed_fs = 0; gint64 ret; GCData *data; SeafDBTrans *trans = NULL; ret = seaf_block_manager_foreach_block (seaf->block_mgr, repo->store_id, repo->version, collect_exist_blocks, exist_blocks); if (ret < 0) { seaf_warning ("Failed to collect existing blocks for repo %.8s, stop GC.\n\n", repo->id); g_hash_table_destroy (exist_blocks); return ret; } total_blocks = g_hash_table_size (exist_blocks); if (total_blocks == 0) { seaf_message ("No blocks for repo %.8s, skip GC.\n\n", repo->id); g_hash_table_destroy (exist_blocks); return 0; } if (rm_fs) { exist_fs = g_hash_table_new_full (g_str_hash, g_str_equal, g_free, NULL); ret = seaf_obj_store_foreach_obj (seaf->fs_mgr->obj_store, repo->store_id, repo->version, collect_exist_fs, exist_fs); if (ret < 0) { seaf_warning ("Failed to collect existing fs for repo %.8s, stop GC.\n\n", repo->id); goto out; } total_fs = g_hash_table_size (exist_fs); } if (rm_fs) seaf_message ("GC started for repo %.8s. Total block number is %"G_GUINT64_FORMAT", total fs number is %"G_GUINT64_FORMAT".\n", repo->id, total_blocks, total_fs); else seaf_message ("GC started for repo %.8s. Total block number is %"G_GUINT64_FORMAT".\n", repo->id, total_blocks); /* * Store the index of live blocks in bloom filter to save memory. * Since bloom filters only have false-positive, we * may skip some garbage blocks, but we won't delete * blocks that are still alive. */ blocks_index = alloc_gc_index (repo->id, total_blocks); if (!blocks_index) { seaf_warning ("GC: Failed to allocate blocks index for repo %.8s, stop gc.\n", repo->id); ret = -1; goto out; } if (rm_fs && total_fs > 0) { fs_index = alloc_gc_index (repo->id, total_fs); if (!fs_index) { seaf_warning ("GC: Failed to allocate fs index for repo %.8s, stop gc.\n", repo->id); ret = -1; goto out; } } data = gc_data_new (repo, blocks_index, fs_index, verbose); ret = populate_gc_index_for_repo (data, trans); if (ret < 0) { goto out; } reachable_blocks += ret; /* Since virtual repos share fs and block store with the origin repo, * it's necessary to do GC for them together. */ ret = populate_gc_index_for_virtual_repos (repo, &virtual_repos, blocks_index, fs_index, trans, verbose); if (ret < 0) { goto out; } reachable_blocks += ret; if (online) { trans = seaf_db_begin_transaction (seaf->db); if (!trans) goto out; } ret = populate_gc_index_for_repo_for_new_commits (data, trans); if (ret < 0) { if (online) { seaf_db_rollback (trans); seaf_db_trans_close (trans); } goto out; } reachable_blocks += ret; ret = populate_gc_index_for_virtual_repos_for_new_commits (virtual_repos, trans); if (ret < 0) { if (online) { seaf_db_rollback (trans); seaf_db_trans_close (trans); } goto out; } reachable_blocks += ret; if (!dry_run) seaf_message ("Scanning and deleting unused blocks for repo %.8s.\n", repo->id); else seaf_message ("Scanning unused blocks for repo %.8s.\n", repo->id); ret = check_existing_blocks (repo->store_id, repo->version, exist_blocks, blocks_index, dry_run); if (ret < 0) { if (online) { seaf_db_rollback (trans); seaf_db_trans_close (trans); } goto out; } if (rm_fs && total_fs > 0) { removed_fs = check_existing_fs(repo->store_id, repo->version, exist_fs, fs_index, dry_run); if (removed_fs < 0) { if (online) { seaf_db_rollback (trans); seaf_db_trans_close (trans); } goto out; } } if (!dry_run) { if (rm_fs) seaf_message ("GC finished for repo %.8s. %"G_GUINT64_FORMAT" blocks total, " "about %"G_GUINT64_FORMAT" reachable blocks, " "%"G_GUINT64_FORMAT" blocks are removed. " "%"G_GUINT64_FORMAT" fs are removed.\n", repo->id, total_blocks, reachable_blocks, ret, removed_fs); else seaf_message ("GC finished for repo %.8s. %"G_GUINT64_FORMAT" blocks total, " "about %"G_GUINT64_FORMAT" reachable blocks, " "%"G_GUINT64_FORMAT" blocks are removed.\n", repo->id, total_blocks, reachable_blocks, ret); } else { if (rm_fs) seaf_message ("GC finished for repo %.8s. %"G_GUINT64_FORMAT" blocks total, " "about %"G_GUINT64_FORMAT" reachable blocks, " "%"G_GUINT64_FORMAT" blocks can be removed. " "%"G_GUINT64_FORMAT" fs can be removed.\n", repo->id, total_blocks, reachable_blocks, ret, removed_fs); else seaf_message ("GC finished for repo %.8s. %"G_GUINT64_FORMAT" blocks total, " "about %"G_GUINT64_FORMAT" reachable blocks, " "%"G_GUINT64_FORMAT" blocks can be removed.\n", repo->id, total_blocks, reachable_blocks, ret); } if (online) { if (seaf_db_commit (trans) < 0) { seaf_db_rollback (trans); } seaf_db_trans_close (trans); } out: printf ("\n"); if (blocks_index) bloom_destroy (blocks_index); if (fs_index) bloom_destroy(fs_index); g_hash_table_destroy (exist_blocks); if (exist_fs) g_hash_table_destroy (exist_fs); gc_data_free (data); g_list_free_full(virtual_repos, (GDestroyNotify)gc_data_free); return ret; } typedef enum RemoveType { COMMIT, FS, BLOCK } RemoveType; typedef struct RemoveTask { const char *repo_id; RemoveType remove_type; gboolean success; } RemoveTask; static void remove_store (gpointer data, gpointer user_data) { RemoveTask *task = data; GAsyncQueue *async_queue = user_data; int ret = 0; switch (task->remove_type) { case COMMIT: seaf_message ("Deleting commits for repo %s.\n", task->repo_id); ret = seaf_commit_manager_remove_store (seaf->commit_mgr, task->repo_id); if (ret == 0) { task->success = TRUE; } break; case FS: seaf_message ("Deleting fs objects for repo %s.\n", task->repo_id); ret = seaf_fs_manager_remove_store (seaf->fs_mgr, task->repo_id); if (ret == 0) { task->success = TRUE; } break; case BLOCK: seaf_message ("Deleting blocks for repo %s.\n", task->repo_id); ret = seaf_block_manager_remove_store (seaf->block_mgr, task->repo_id); if (ret == 0) { task->success = TRUE; } break; default: break; } g_async_queue_push (async_queue, task); } void delete_garbaged_repos (int dry_run, int thread_num) { GList *del_repos = NULL; GList *ptr; GAsyncQueue *async_queue = NULL; int tnum; GThreadPool *tpool = NULL; RemoveTask *task = NULL; int n_tasks = 0; char *repo_id; char *dup_id; GHashTableIter iter; gpointer key, value; GHashTable *deleted; deleted = g_hash_table_new_full (g_str_hash, g_str_equal, g_free, NULL); seaf_message ("=== Repos deleted by users ===\n"); del_repos = seaf_repo_manager_list_garbage_repos (seaf->repo_mgr); if (!dry_run && del_repos) { async_queue = g_async_queue_new (); if (!async_queue) { seaf_warning ("Failed to create async queue.\n"); goto out; } tnum = thread_num <= 0 ? MAX_THREADS : thread_num; tpool = g_thread_pool_new (remove_store, async_queue, tnum, FALSE, NULL); if (!tpool) { seaf_warning ("Failed to create thread pool.\n"); goto out; } } for (ptr = del_repos; ptr; ptr = ptr->next) { repo_id = ptr->data; if (!is_uuid_valid(repo_id)) { continue; } /* Confirm repo doesn't exist before removing blocks. */ if (!seaf_repo_manager_repo_exists (seaf->repo_mgr, repo_id)) { if (!dry_run) { seaf_message ("Start to GC deleted repo %s.\n", repo_id); // Remove commit task = g_new0 (RemoveTask, 1); task->repo_id = repo_id; task->remove_type = COMMIT; g_thread_pool_push (tpool, task, NULL); // Remove fs task = g_new0 (RemoveTask, 1); task->repo_id = repo_id; task->remove_type = FS; g_thread_pool_push (tpool, task, NULL); // Remove block task = g_new0 (RemoveTask, 1); task->repo_id = repo_id; task->remove_type = BLOCK; g_thread_pool_push (tpool, task, NULL); n_tasks += 3; dup_id = g_strdup (repo_id); g_hash_table_insert (deleted, dup_id, dup_id); } else { seaf_message ("Repo %s can be GC'ed.\n", repo_id); } } } while (n_tasks > 0 && (task = g_async_queue_pop (async_queue))) { n_tasks--; if (!task->success) { if (g_hash_table_lookup (deleted, task->repo_id)) { g_hash_table_remove(deleted, task->repo_id); } } g_free (task); } if (!dry_run) { g_hash_table_iter_init (&iter, deleted); while (g_hash_table_iter_next (&iter, &key, &value)) { seaf_repo_manager_remove_garbage_repo (seaf->repo_mgr, (char *)key); } } out: g_hash_table_destroy (deleted); if (tpool) g_thread_pool_free (tpool, TRUE, TRUE); if (async_queue) g_async_queue_unref (async_queue); string_list_free (del_repos); } typedef struct GCRepoParam { int dry_run; int verbose; int rm_fs; gboolean online; GAsyncQueue *async_queue; } GCRepoParam; typedef struct GCRepo { SeafRepo *repo; gint64 gc_ret; } GCRepo; static void free_gc_repo (GCRepo *gc_repo) { if (!gc_repo) return; seaf_repo_unref (gc_repo->repo); g_free (gc_repo); } static void gc_repo_cb (gpointer data, gpointer user_data) { GCRepo *gc_repo = data; GCRepoParam *param = user_data; SeafRepo *repo = gc_repo->repo; seaf_message ("GC version %d repo %s(%s)\n", repo->version, repo->name, repo->id); gc_repo->gc_ret = gc_v1_repo (repo, param->dry_run, param->online, param->verbose, param->rm_fs); g_async_queue_push (param->async_queue, gc_repo); } int gc_core_run (GList *repo_id_list, const char *id_prefix, int dry_run, int verbose, int thread_num, int rm_fs) { GList *ptr; SeafRepo *repo; GList *corrupt_repos = NULL; GList *del_block_repos = NULL; gboolean del_garbage = FALSE; GAsyncQueue *async_queue = NULL; GCRepoParam *param = NULL; int tnum; GThreadPool *tpool = NULL; int gc_repo_num = 0; GCRepo *gc_repo = NULL; char *repo_id; gboolean online; if (seaf_db_type (seaf->db) == SEAF_DB_TYPE_SQLITE) { online = FALSE; seaf_message ("Database is SQLite, use offline GC.\n"); } else { online = TRUE; seaf_message ("Database is MySQL/Postgre/Oracle, use online GC.\n"); } async_queue = g_async_queue_new (); if (!async_queue) { seaf_warning ("Failed to create async queue, stop gc.\n"); return -1; } param = g_new0 (GCRepoParam, 1); param->dry_run = dry_run; param->verbose = verbose; param->rm_fs = rm_fs; param->online = online; param->async_queue = async_queue; tnum = thread_num <= 0 ? MAX_THREADS : thread_num; tpool = g_thread_pool_new (gc_repo_cb, param, tnum, FALSE, NULL); if (!tpool) { seaf_warning ("Failed to create thread pool, stop gc.\n"); g_async_queue_unref (async_queue); g_free (param); return -1; } seaf_message ("Using up to %d threads to run GC.\n", tnum); if (id_prefix) { if (repo_id_list) g_list_free (repo_id_list); repo_id_list = seaf_repo_manager_get_repo_id_list_by_prefix (seaf->repo_mgr, id_prefix); del_garbage = TRUE; } else if (repo_id_list == NULL) { repo_id_list = seaf_repo_manager_get_repo_id_list (seaf->repo_mgr); del_garbage = TRUE; } for (ptr = repo_id_list; ptr; ptr = ptr->next) { repo = seaf_repo_manager_get_repo_ex (seaf->repo_mgr, (const gchar *)ptr->data); g_free (ptr->data); if (!repo) continue; if (repo->is_corrupted) { corrupt_repos = g_list_prepend (corrupt_repos, g_strdup(repo->id)); seaf_message ("Repo %s is damaged, skip GC.\n\n", repo->id); seaf_repo_unref (repo); continue; } if (!repo->is_virtual) { gc_repo = g_new0 (GCRepo, 1); gc_repo->repo = repo; g_thread_pool_push (tpool, gc_repo, NULL); gc_repo_num++; } else { seaf_repo_unref (repo); } } g_list_free (repo_id_list); while (gc_repo_num > 0 && (gc_repo = g_async_queue_pop (async_queue))) { if (gc_repo->gc_ret < 0) { corrupt_repos = g_list_prepend (corrupt_repos, g_strdup(gc_repo->repo->id)); } else if (dry_run && gc_repo->gc_ret) { del_block_repos = g_list_prepend (del_block_repos, g_strdup(gc_repo->repo->id)); } free_gc_repo (gc_repo); gc_repo_num--; } if (del_garbage) { delete_garbaged_repos (dry_run, tnum); } seaf_message ("=== GC is finished ===\n"); if (corrupt_repos) { seaf_message ("The following repos are damaged. " "You can run seaf-fsck to fix them.\n"); for (ptr = corrupt_repos; ptr; ptr = ptr->next) { repo_id = ptr->data; seaf_message ("%s\n", repo_id); g_free (repo_id); } g_list_free (corrupt_repos); } if (del_block_repos) { printf("\n"); seaf_message ("The following repos have blocks to be removed:\n"); for (ptr = del_block_repos; ptr; ptr = ptr->next) { repo_id = ptr->data; seaf_message ("%s\n", repo_id); g_free (repo_id); } g_list_free (del_block_repos); } g_thread_pool_free (tpool, TRUE, TRUE); g_async_queue_unref (async_queue); g_free (param); return 0; } ================================================ FILE: server/gc/gc-core.h ================================================ #ifndef GC_CORE_H #define GC_CORE_H int gc_core_run (GList *repo_id_list, const char *id_prefix, int dry_run, int verbose, int thread_num, int rm_fs); void delete_garbaged_repos (int dry_run, int thread_num); #endif ================================================ FILE: server/gc/repo-mgr.c ================================================ /* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */ #include "common.h" #include #include "utils.h" #include "log.h" #include "seafile-session.h" #include "commit-mgr.h" #include "branch-mgr.h" #include "repo-mgr.h" #include "fs-mgr.h" #include "seafile-error.h" #include "seaf-db.h" #define INDEX_DIR "index" struct _SeafRepoManagerPriv { }; static SeafRepo * load_repo (SeafRepoManager *manager, const char *repo_id, gboolean ret_corrupt); gboolean is_repo_id_valid (const char *id) { if (!id) return FALSE; return is_uuid_valid (id); } SeafRepo* seaf_repo_new (const char *id, const char *name, const char *desc) { SeafRepo* repo; /* valid check */ repo = g_new0 (SeafRepo, 1); memcpy (repo->id, id, 36); repo->id[36] = '\0'; repo->name = g_strdup(name); repo->desc = g_strdup(desc); repo->ref_cnt = 1; return repo; } void seaf_repo_free (SeafRepo *repo) { if (repo->name) g_free (repo->name); if (repo->desc) g_free (repo->desc); if (repo->category) g_free (repo->category); if (repo->head) seaf_branch_unref (repo->head); g_free (repo->pwd_hash_algo); g_free (repo->pwd_hash_params); g_free (repo); } void seaf_repo_ref (SeafRepo *repo) { g_atomic_int_inc (&repo->ref_cnt); } void seaf_repo_unref (SeafRepo *repo) { if (!repo) return; if (g_atomic_int_dec_and_test (&repo->ref_cnt)) seaf_repo_free (repo); } static void set_head_common (SeafRepo *repo, SeafBranch *branch) { if (repo->head) seaf_branch_unref (repo->head); repo->head = branch; seaf_branch_ref(branch); } void seaf_repo_from_commit (SeafRepo *repo, SeafCommit *commit) { repo->name = g_strdup (commit->repo_name); repo->desc = g_strdup (commit->repo_desc); repo->encrypted = commit->encrypted; repo->repaired = commit->repaired; if (repo->encrypted) { repo->enc_version = commit->enc_version; if (repo->enc_version == 1 && !commit->pwd_hash_algo) memcpy (repo->magic, commit->magic, 32); else if (repo->enc_version == 2) { memcpy (repo->random_key, commit->random_key, 96); } else if (repo->enc_version == 3) { memcpy (repo->random_key, commit->random_key, 96); memcpy (repo->salt, commit->salt, 64); } else if (repo->enc_version == 4) { memcpy (repo->random_key, commit->random_key, 96); memcpy (repo->salt, commit->salt, 64); } if (repo->enc_version >= 2 && !commit->pwd_hash_algo) { memcpy (repo->magic, commit->magic, 64); } if (commit->pwd_hash_algo) { memcpy (repo->pwd_hash, commit->pwd_hash, 64); repo->pwd_hash_algo = g_strdup (commit->pwd_hash_algo); repo->pwd_hash_params = g_strdup (commit->pwd_hash_params); } } repo->no_local_history = commit->no_local_history; repo->version = commit->version; } void seaf_repo_to_commit (SeafRepo *repo, SeafCommit *commit) { commit->repo_name = g_strdup (repo->name); commit->repo_desc = g_strdup (repo->desc); commit->encrypted = repo->encrypted; commit->repaired = repo->repaired; if (commit->encrypted) { commit->enc_version = repo->enc_version; if (commit->enc_version == 1 && !repo->pwd_hash_algo) commit->magic = g_strdup (repo->magic); else if (commit->enc_version == 2) { commit->random_key = g_strdup (repo->random_key); } else if (commit->enc_version == 3) { commit->random_key = g_strdup (repo->random_key); commit->salt = g_strdup (repo->salt); } else if (commit->enc_version == 4) { commit->random_key = g_strdup (repo->random_key); commit->salt = g_strdup (repo->salt); } if (commit->enc_version >= 2 && !repo->pwd_hash_algo) { commit->magic = g_strdup (repo->magic); } if (repo->pwd_hash_algo) { commit->pwd_hash = g_strdup (repo->pwd_hash); commit->pwd_hash_algo = g_strdup (repo->pwd_hash_algo); commit->pwd_hash_params = g_strdup (repo->pwd_hash_params); } } commit->no_local_history = repo->no_local_history; commit->version = repo->version; } static gboolean collect_commit (SeafCommit *commit, void *vlist, gboolean *stop) { GList **commits = vlist; /* The traverse function will unref the commit, so we need to ref it. */ seaf_commit_ref (commit); *commits = g_list_prepend (*commits, commit); return TRUE; } GList * seaf_repo_get_commits (SeafRepo *repo) { GList *branches; GList *ptr; SeafBranch *branch; GList *commits = NULL; branches = seaf_branch_manager_get_branch_list (seaf->branch_mgr, repo->id); if (branches == NULL) { seaf_warning ("Failed to get branch list of repo %s.\n", repo->id); return NULL; } for (ptr = branches; ptr != NULL; ptr = ptr->next) { branch = ptr->data; gboolean res = seaf_commit_manager_traverse_commit_tree (seaf->commit_mgr, repo->id, repo->version, branch->commit_id, collect_commit, &commits, FALSE); if (!res) { for (ptr = commits; ptr != NULL; ptr = ptr->next) seaf_commit_unref ((SeafCommit *)(ptr->data)); g_list_free (commits); goto out; } } commits = g_list_reverse (commits); out: for (ptr = branches; ptr != NULL; ptr = ptr->next) { seaf_branch_unref ((SeafBranch *)ptr->data); } return commits; } SeafRepoManager* seaf_repo_manager_new (SeafileSession *seaf) { SeafRepoManager *mgr = g_new0 (SeafRepoManager, 1); mgr->priv = g_new0 (SeafRepoManagerPriv, 1); mgr->seaf = seaf; return mgr; } int seaf_repo_manager_init (SeafRepoManager *mgr) { return 0; } int seaf_repo_manager_start (SeafRepoManager *mgr) { return 0; } static gboolean repo_exists_in_db (SeafDB *db, const char *id) { char sql[256]; gboolean db_err = FALSE; snprintf (sql, sizeof(sql), "SELECT repo_id FROM Repo WHERE repo_id = '%s'", id); return seaf_db_check_for_existence (db, sql, &db_err); } static gboolean repo_exists_in_db_ex (SeafDB *db, const char *id, gboolean *db_err) { char sql[256]; snprintf (sql, sizeof(sql), "SELECT repo_id FROM Repo WHERE repo_id = '%s'", id); return seaf_db_check_for_existence (db, sql, db_err); } SeafRepo* seaf_repo_manager_get_repo (SeafRepoManager *manager, const gchar *id) { SeafRepo repo; int len = strlen(id); if (len >= 37) return NULL; memcpy (repo.id, id, len + 1); if (repo_exists_in_db (manager->seaf->db, id)) { SeafRepo *ret = load_repo (manager, id, FALSE); if (!ret) return NULL; /* seaf_repo_ref (ret); */ return ret; } return NULL; } SeafRepo* seaf_repo_manager_get_repo_ex (SeafRepoManager *manager, const gchar *id) { int len = strlen(id); gboolean db_err = FALSE, exists; SeafRepo *ret = NULL; if (len >= 37) return NULL; exists = repo_exists_in_db_ex (manager->seaf->db, id, &db_err); if (db_err) { ret = seaf_repo_new(id, NULL, NULL); ret->is_corrupted = TRUE; return ret; } if (exists) { ret = load_repo (manager, id, TRUE); return ret; } return NULL; } gboolean seaf_repo_manager_repo_exists (SeafRepoManager *manager, const gchar *id) { SeafRepo repo; memcpy (repo.id, id, 37); return repo_exists_in_db (manager->seaf->db, id); } static void load_repo_commit (SeafRepoManager *manager, SeafRepo *repo, SeafBranch *branch) { SeafCommit *commit; commit = seaf_commit_manager_get_commit_compatible (manager->seaf->commit_mgr, repo->id, branch->commit_id); if (!commit) { seaf_warning ("Commit %s is missing\n", branch->commit_id); repo->is_corrupted = TRUE; return; } set_head_common (repo, branch); seaf_repo_from_commit (repo, commit); seaf_commit_unref (commit); } static SeafRepo * load_repo (SeafRepoManager *manager, const char *repo_id, gboolean ret_corrupt) { SeafRepo *repo; SeafBranch *branch; SeafVirtRepo *vinfo = NULL; repo = seaf_repo_new(repo_id, NULL, NULL); if (!repo) { seaf_warning ("[repo mgr] failed to alloc repo.\n"); return NULL; } repo->manager = manager; branch = seaf_branch_manager_get_branch (seaf->branch_mgr, repo_id, "master"); if (!branch) { seaf_warning ("Failed to get master branch of repo %.8s.\n", repo_id); repo->is_corrupted = TRUE; } else { load_repo_commit (manager, repo, branch); seaf_branch_unref (branch); } if (repo->is_corrupted) { if (!ret_corrupt) { seaf_repo_free (repo); return NULL; } return repo; } vinfo = seaf_repo_manager_get_virtual_repo_info (manager, repo_id); if (vinfo) { repo->is_virtual = TRUE; memcpy (repo->store_id, vinfo->origin_repo_id, 36); } else { repo->is_virtual = FALSE; memcpy (repo->store_id, repo->id, 36); } seaf_virtual_repo_info_free (vinfo); return repo; } static gboolean collect_repo_id (SeafDBRow *row, void *data) { GList **p_ids = data; const char *repo_id; repo_id = seaf_db_row_get_column_text (row, 0); *p_ids = g_list_prepend (*p_ids, g_strdup(repo_id)); return TRUE; } GList * seaf_repo_manager_get_repo_id_list (SeafRepoManager *mgr) { GList *ret = NULL; char sql[256]; snprintf (sql, 256, "SELECT repo_id FROM Repo"); if (seaf_db_foreach_selected_row (mgr->seaf->db, sql, collect_repo_id, &ret) < 0) return NULL; return ret; } GList * seaf_repo_manager_get_repo_id_list_by_prefix (SeafRepoManager *mgr, const char *prefix) { GList *ret = NULL; char sql[256]; snprintf (sql, 256, "SELECT repo_id FROM Repo WHERE repo_id LIKE '%s%%'", prefix); if (seaf_db_foreach_selected_row (mgr->seaf->db, sql, collect_repo_id, &ret) < 0) { return NULL; } return ret; } GList * seaf_repo_manager_get_repo_list (SeafRepoManager *mgr, int start, int limit, gboolean *error) { char sql[256]; GList *id_list = NULL, *ptr; GList *ret = NULL; SeafRepo *repo; *error = FALSE; if (start == -1 && limit == -1) snprintf (sql, 256, "SELECT repo_id FROM Repo"); else snprintf (sql, 256, "SELECT repo_id FROM Repo LIMIT %d, %d", start, limit); if (seaf_db_foreach_selected_row (mgr->seaf->db, sql, collect_repo_id, &id_list) < 0) goto error; for (ptr = id_list; ptr; ptr = ptr->next) { char *repo_id = ptr->data; repo = seaf_repo_manager_get_repo_ex (mgr, repo_id); if (repo) ret = g_list_prepend (ret, repo); } string_list_free (id_list); return ret; error: *error = TRUE; string_list_free (id_list); return NULL; } int seaf_repo_manager_set_repo_history_limit (SeafRepoManager *mgr, const char *repo_id, int days) { SeafVirtRepo *vinfo; SeafDB *db = mgr->seaf->db; char sql[256]; vinfo = seaf_repo_manager_get_virtual_repo_info (mgr, repo_id); if (vinfo) { seaf_virtual_repo_info_free (vinfo); return 0; } if (seaf_db_type(db) == SEAF_DB_TYPE_PGSQL) { gboolean err; snprintf(sql, sizeof(sql), "SELECT repo_id FROM RepoHistoryLimit " "WHERE repo_id='%s'", repo_id); if (seaf_db_check_for_existence(db, sql, &err)) snprintf(sql, sizeof(sql), "UPDATE RepoHistoryLimit SET days=%d" "WHERE repo_id='%s'", days, repo_id); else snprintf(sql, sizeof(sql), "INSERT INTO RepoHistoryLimit (repo_id, days) VALUES " "('%s', %d)", repo_id, days); if (err) return -1; return seaf_db_query(db, sql); } else { snprintf (sql, sizeof(sql), "REPLACE INTO RepoHistoryLimit (repo_id, days) VALUES ('%s', %d)", repo_id, days); if (seaf_db_query (db, sql) < 0) return -1; } return 0; } static gboolean get_limit (SeafDBRow *row, void *vdays) { int *days = vdays; *days = seaf_db_row_get_column_int (row, 0); return FALSE; } int seaf_repo_manager_get_repo_history_limit (SeafRepoManager *mgr, const char *repo_id) { SeafVirtRepo *vinfo; const char *r_repo_id = repo_id; char sql[256]; int per_repo_days = -1; int ret; vinfo = seaf_repo_manager_get_virtual_repo_info (mgr, repo_id); if (vinfo) r_repo_id = vinfo->origin_repo_id; snprintf (sql, sizeof(sql), "SELECT days FROM RepoHistoryLimit WHERE repo_id='%s'", r_repo_id); seaf_virtual_repo_info_free (vinfo); /* We don't use seaf_db_get_int() because we need to differ DB error * from not exist. * We can't just return global config value if DB error occured, * since the global value may be smaller than per repo one. * This can lead to data lose in GC. */ ret = seaf_db_foreach_selected_row (mgr->seaf->db, sql, get_limit, &per_repo_days); if (ret == 0) { /* If per repo value is not set, return the global one. */ per_repo_days = seaf_cfg_manager_get_config_int (mgr->seaf->cfg_mgr, "history", "keep_days"); } if (per_repo_days < 0) { per_repo_days = -1; } return per_repo_days; } int seaf_repo_manager_set_repo_valid_since (SeafRepoManager *mgr, const char *repo_id, gint64 timestamp) { SeafDB *db = mgr->seaf->db; char sql[256]; if (seaf_db_type(db) == SEAF_DB_TYPE_PGSQL) { gboolean err; snprintf(sql, sizeof(sql), "SELECT repo_id FROM RepoValidSince WHERE " "repo_id='%s'", repo_id); if (seaf_db_check_for_existence(db, sql, &err)) snprintf(sql, sizeof(sql), "UPDATE RepoValidSince SET timestamp=%"G_GINT64_FORMAT " WHERE repo_id='%s'", timestamp, repo_id); else snprintf(sql, sizeof(sql), "INSERT INTO RepoValidSince (repo_id, timestamp) VALUES " "('%s', %"G_GINT64_FORMAT")", repo_id, timestamp); if (err) return -1; if (seaf_db_query (db, sql) < 0) return -1; } else { snprintf (sql, sizeof(sql), "REPLACE INTO RepoValidSince (repo_id, timestamp) VALUES ('%s', %"G_GINT64_FORMAT")", repo_id, timestamp); if (seaf_db_query (db, sql) < 0) return -1; } return 0; } gint64 seaf_repo_manager_get_repo_valid_since (SeafRepoManager *mgr, const char *repo_id) { char sql[256]; snprintf (sql, sizeof(sql), "SELECT timestamp FROM RepoValidSince WHERE repo_id='%s'", repo_id); /* Also return -1 if DB error. */ return seaf_db_get_int64 (mgr->seaf->db, sql); } gint64 seaf_repo_manager_get_repo_truncate_time (SeafRepoManager *mgr, const char *repo_id) { int days; gint64 timestamp; days = seaf_repo_manager_get_repo_history_limit (mgr, repo_id); timestamp = seaf_repo_manager_get_repo_valid_since (mgr, repo_id); gint64 now = (gint64)time(NULL); if (days > 0) return MAX (now - days * 24 * 3600, timestamp); else if (days < 0) return timestamp; else return 0; } static gboolean load_virtual_info (SeafDBRow *row, void *p_vinfo) { SeafVirtRepo *vinfo; const char *origin_repo_id, *path, *base_commit; origin_repo_id = seaf_db_row_get_column_text (row, 0); path = seaf_db_row_get_column_text (row, 1); base_commit = seaf_db_row_get_column_text (row, 2); vinfo = g_new0 (SeafVirtRepo, 1); memcpy (vinfo->origin_repo_id, origin_repo_id, 36); vinfo->path = g_strdup(path); memcpy (vinfo->base_commit, base_commit, 40); *((SeafVirtRepo **)p_vinfo) = vinfo; return FALSE; } SeafVirtRepo * seaf_repo_manager_get_virtual_repo_info (SeafRepoManager *mgr, const char *repo_id) { char sql[256]; SeafVirtRepo *vinfo = NULL; snprintf (sql, 256, "SELECT origin_repo, path, base_commit FROM VirtualRepo " "WHERE repo_id = '%s'", repo_id); seaf_db_foreach_selected_row (seaf->db, sql, load_virtual_info, &vinfo); return vinfo; } void seaf_virtual_repo_info_free (SeafVirtRepo *vinfo) { if (!vinfo) return; g_free (vinfo->path); g_free (vinfo); } static gboolean collect_virtual_repo_ids (SeafDBRow *row, void *data) { GList **p_ids = data; const char *repo_id; repo_id = seaf_db_row_get_column_text (row, 0); *p_ids = g_list_prepend (*p_ids, g_strdup(repo_id)); return TRUE; } GList * seaf_repo_manager_get_virtual_repo_ids_by_origin (SeafRepoManager *mgr, const char *origin_repo) { GList *ret = NULL; char sql[256]; snprintf (sql, 256, "SELECT repo_id FROM VirtualRepo WHERE origin_repo='%s'", origin_repo); if (seaf_db_foreach_selected_row (mgr->seaf->db, sql, collect_virtual_repo_ids, &ret) < 0) { return NULL; } return g_list_reverse (ret); } static gboolean get_garbage_repo_id (SeafDBRow *row, void *vid_list) { GList **ret = vid_list; char *repo_id; repo_id = g_strdup(seaf_db_row_get_column_text (row, 0)); *ret = g_list_prepend (*ret, repo_id); return TRUE; } GList * seaf_repo_manager_list_garbage_repos (SeafRepoManager *mgr) { GList *repo_ids = NULL; seaf_db_foreach_selected_row (seaf->db, "SELECT repo_id FROM GarbageRepos", get_garbage_repo_id, &repo_ids); return repo_ids; } void seaf_repo_manager_remove_garbage_repo (SeafRepoManager *mgr, const char *repo_id) { char sql[256]; snprintf (sql, sizeof(sql), "DELETE FROM GarbageRepos WHERE repo_id='%s'", repo_id); seaf_db_query (seaf->db, sql); } ================================================ FILE: server/gc/repo-mgr.h ================================================ /* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */ #ifndef SEAF_REPO_MGR_H #define SEAF_REPO_MGR_H #include #include "seafile-object.h" #include "commit-mgr.h" #include "branch-mgr.h" struct _SeafRepoManager; typedef struct _SeafRepo SeafRepo; typedef struct SeafVirtRepo { char origin_repo_id[37]; char *path; char base_commit[41]; } SeafVirtRepo; struct _SeafRepo { struct _SeafRepoManager *manager; gchar id[37]; gchar *name; gchar *desc; gchar *category; /* not used yet */ gboolean encrypted; int enc_version; gchar magic[65]; /* hash(repo_id + passwd), key stretched. */ gchar pwd_hash[65]; /* hash(repo_id + passwd), key stretched. */ gchar *pwd_hash_algo; gchar *pwd_hash_params; gchar random_key[97]; gchar salt[65]; gboolean no_local_history; SeafBranch *head; gboolean is_corrupted; gboolean repaired; gboolean delete_pending; int ref_cnt; int version; /* Used to access fs and block sotre. * This id is different from repo_id when this repo is virtual. * Virtual repos share fs and block store with its origin repo. * However, commit store for each repo is always independent. * So always use repo_id to access commit store. */ gchar store_id[37]; gboolean is_virtual; }; gboolean is_repo_id_valid (const char *id); SeafRepo* seaf_repo_new (const char *id, const char *name, const char *desc); void seaf_repo_free (SeafRepo *repo); void seaf_repo_ref (SeafRepo *repo); void seaf_repo_unref (SeafRepo *repo); void seaf_repo_to_commit (SeafRepo *repo, SeafCommit *commit); void seaf_repo_from_commit (SeafRepo *repo, SeafCommit *commit); void seaf_virtual_repo_info_free (SeafVirtRepo *vinfo); typedef struct _SeafRepoManager SeafRepoManager; typedef struct _SeafRepoManagerPriv SeafRepoManagerPriv; struct _SeafRepoManager { struct _SeafileSession *seaf; SeafRepoManagerPriv *priv; }; SeafRepoManager* seaf_repo_manager_new (struct _SeafileSession *seaf); int seaf_repo_manager_init (SeafRepoManager *mgr); int seaf_repo_manager_start (SeafRepoManager *mgr); int seaf_repo_manager_add_repo (SeafRepoManager *mgr, SeafRepo *repo); int seaf_repo_manager_del_repo (SeafRepoManager *mgr, SeafRepo *repo); SeafRepo* seaf_repo_manager_get_repo (SeafRepoManager *manager, const gchar *id); SeafRepo* seaf_repo_manager_get_repo_ex (SeafRepoManager *manager, const gchar *id); gboolean seaf_repo_manager_repo_exists (SeafRepoManager *manager, const gchar *id); GList* seaf_repo_manager_get_repo_list (SeafRepoManager *mgr, int start, int limit, gboolean *error); GList * seaf_repo_manager_get_repo_id_list (SeafRepoManager *mgr); GList * seaf_repo_manager_get_repo_id_list_by_prefix (SeafRepoManager *mgr, const char *prefix); int seaf_repo_manager_set_repo_history_limit (SeafRepoManager *mgr, const char *repo_id, int days); int seaf_repo_manager_get_repo_history_limit (SeafRepoManager *mgr, const char *repo_id); int seaf_repo_manager_set_repo_valid_since (SeafRepoManager *mgr, const char *repo_id, gint64 timestamp); gint64 seaf_repo_manager_get_repo_valid_since (SeafRepoManager *mgr, const char *repo_id); /* * Return the timestamp to stop traversing history. * Returns > 0 if traverse a period of history; * Returns = 0 if only traverse the head commit; * Returns < 0 if traverse full history. */ gint64 seaf_repo_manager_get_repo_truncate_time (SeafRepoManager *mgr, const char *repo_id); SeafVirtRepo * seaf_repo_manager_get_virtual_repo_info (SeafRepoManager *mgr, const char *repo_id); void seaf_virtual_repo_info_free (SeafVirtRepo *vinfo); GList * seaf_repo_manager_get_virtual_repo_ids_by_origin (SeafRepoManager *mgr, const char *origin_repo); GList * seaf_repo_manager_list_garbage_repos (SeafRepoManager *mgr); void seaf_repo_manager_remove_garbage_repo (SeafRepoManager *mgr, const char *repo_id); #endif ================================================ FILE: server/gc/seaf-fsck.c ================================================ #include "common.h" #include "log.h" #include #include "seafile-session.h" #include "fsck.h" #include "utils.h" static char *ccnet_dir = NULL; static char *seafile_dir = NULL; static char *central_config_dir = NULL; SeafileSession *seaf; static const char *short_opts = "hvft:c:d:rE:F:sS"; static const struct option long_opts[] = { { "help", no_argument, NULL, 'h', }, { "version", no_argument, NULL, 'v', }, { "force", no_argument, NULL, 'f', }, { "repair", no_argument, NULL, 'r', }, { "threads", required_argument, NULL, 't', }, { "export", required_argument, NULL, 'E', }, { "config-file", required_argument, NULL, 'c', }, { "central-config-dir", required_argument, NULL, 'F' }, { "seafdir", required_argument, NULL, 'd', }, { "shallow", no_argument, NULL, 's', }, { "check-file-size", no_argument, NULL, 'S' }, { 0, 0, 0, 0, }, }; static void usage () { fprintf (stderr, "usage: seaf-fsck [-r] [-E exported_path] [-c config_dir] [-d seafile_dir] " "[repo_id_1 [repo_id_2 ...]]\n"); } #ifdef WIN32 /* Get the commandline arguments in unicode, then convert them to utf8 */ static char ** get_argv_utf8 (int *argc) { int i = 0; char **argv = NULL; const wchar_t *cmdline = NULL; wchar_t **argv_w = NULL; cmdline = GetCommandLineW(); argv_w = CommandLineToArgvW (cmdline, argc); if (!argv_w) { printf("failed to CommandLineToArgvW(), GLE=%lu\n", GetLastError()); return NULL; } argv = (char **)malloc (sizeof(char*) * (*argc)); for (i = 0; i < *argc; i++) { argv[i] = wchar_to_utf8 (argv_w[i]); } return argv; } #endif #ifdef __linux__ /* Compare the owner uid of the seafile-data dir with the current uid. */ static gboolean check_user (const char *seafile_dir, uid_t *current_user, uid_t *seafile_user) { struct stat st; uid_t euid; if (stat (seafile_dir, &st) < 0) { seaf_warning ("Failed to stat seafile data dir %s: %s\n", seafile_dir, strerror(errno)); return FALSE; } euid = geteuid(); *current_user = euid; *seafile_user = st.st_uid; return (euid == st.st_uid); } #endif /* __linux__ */ int main(int argc, char *argv[]) { int c; gboolean repair = FALSE; gboolean force = FALSE; gboolean check_integrity = TRUE; gboolean check_file_size = FALSE; char *export_path = NULL; int max_thread_num = 0; #ifdef WIN32 argv = get_argv_utf8 (&argc); #endif ccnet_dir = DEFAULT_CONFIG_DIR; while ((c = getopt_long(argc, argv, short_opts, long_opts, NULL)) != EOF) { switch (c) { case 'h': usage(); exit(0); case 'v': exit(-1); break; case 'f': force = TRUE; break; case 't': max_thread_num = atoi(strdup(optarg)); break; case 'r': repair = TRUE; break; case 'E': export_path = strdup(optarg); break; case 'c': ccnet_dir = strdup(optarg); break; case 'd': seafile_dir = strdup(optarg); break; case 'F': central_config_dir = strdup(optarg); break; case 'S': check_file_size = TRUE; break; case 's': check_integrity = FALSE; break; default: usage(); exit(-1); } } #if !GLIB_CHECK_VERSION(2, 35, 0) g_type_init(); #endif if (seafile_log_init ("-", "info", "debug", "seaf-fsck") < 0) { fprintf (stderr, "Failed to init log.\n"); exit (1); } if (seafile_dir == NULL) seafile_dir = g_build_filename (ccnet_dir, "seafile-data", NULL); #ifdef __linux__ uid_t current_user, seafile_user; if (!export_path && !force && !check_user (seafile_dir, ¤t_user, &seafile_user)) { seaf_message ("Current user (%u) is not the user for running " "seafile server (%u). Unable to run fsck.\n", current_user, seafile_user); exit(1); } #endif seaf = seafile_session_new(central_config_dir, seafile_dir, ccnet_dir, export_path == NULL); if (!seaf) { seaf_warning ("Failed to create seafile session.\n"); exit (1); } GList *repo_id_list = NULL; int i; for (i = optind; i < argc; i++) repo_id_list = g_list_append (repo_id_list, g_strdup(argv[i])); if (export_path) { export_file (repo_id_list, seafile_dir, export_path); } else { FsckOptions options; memset (&options, 0, sizeof(FsckOptions)); options.max_thread_num = max_thread_num; options.check_integrity = check_integrity; options.check_file_size = check_file_size; options.repair = repair; seaf_fsck (repo_id_list, &options); } return 0; } ================================================ FILE: server/gc/seafile-session.c ================================================ #include "common.h" #include #include #include #include #include "seafile-session.h" #include "seaf-utils.h" #include "log.h" SeafileSession * seafile_session_new(const char *central_config_dir, const char *seafile_dir, const char *ccnet_dir, gboolean need_db) { char *abs_central_config_dir = NULL; char *abs_seafile_dir; char *abs_ccnet_dir; char *tmp_file_dir; char *config_file_path; struct stat st; GKeyFile *config; SeafileSession *session = NULL; abs_seafile_dir = ccnet_expand_path (seafile_dir); abs_ccnet_dir = ccnet_expand_path (ccnet_dir); tmp_file_dir = g_build_filename (abs_seafile_dir, "tmpfiles", NULL); if (central_config_dir) { abs_central_config_dir = ccnet_expand_path (central_config_dir); } const char *confdir = abs_central_config_dir ? abs_central_config_dir : abs_seafile_dir; config_file_path = g_build_filename(confdir, "seafile.conf", NULL); if (g_stat(confdir, &st) < 0 || !S_ISDIR(st.st_mode)) { seaf_warning ("Config dir dir %s does not exist\n", abs_seafile_dir); goto onerror; } if (g_stat(abs_seafile_dir, &st) < 0 || !S_ISDIR(st.st_mode)) { seaf_warning ("Seafile data dir %s does not exist\n", abs_seafile_dir); goto onerror; } if (g_stat(tmp_file_dir, &st) < 0 || !S_ISDIR(st.st_mode)) { seaf_warning ("Seafile tmp dir %s does not exist\n", tmp_file_dir); goto onerror; } GError *error = NULL; config = g_key_file_new (); if (!g_key_file_load_from_file (config, config_file_path, G_KEY_FILE_NONE, &error)) { seaf_warning ("Failed to load config file.\n"); g_key_file_free (config); goto onerror; } session = g_new0(SeafileSession, 1); session->seaf_dir = abs_seafile_dir; session->ccnet_dir = abs_ccnet_dir; session->tmp_file_dir = tmp_file_dir; session->config = config; if (need_db) { if (load_database_config (session) < 0) { seaf_warning ("Failed to load database config.\n"); goto onerror; } } session->cfg_mgr = seaf_cfg_manager_new (session); if (!session->cfg_mgr) goto onerror; session->fs_mgr = seaf_fs_manager_new (session, abs_seafile_dir); if (!session->fs_mgr) goto onerror; session->block_mgr = seaf_block_manager_new (session, abs_seafile_dir); if (!session->block_mgr) goto onerror; session->commit_mgr = seaf_commit_manager_new (session); if (!session->commit_mgr) goto onerror; session->repo_mgr = seaf_repo_manager_new (session); if (!session->repo_mgr) goto onerror; session->branch_mgr = seaf_branch_manager_new (session); if (!session->branch_mgr) goto onerror; return session; onerror: free (abs_seafile_dir); g_free (tmp_file_dir); g_free (config_file_path); g_free (session); return NULL; } ================================================ FILE: server/gc/seafile-session.h ================================================ #ifndef SEAFILE_SESSION_H #define SEAFILE_SESSION_H #include #include #include "block-mgr.h" #include "fs-mgr.h" #include "commit-mgr.h" #include "branch-mgr.h" #include "repo-mgr.h" #include "db.h" #include "seaf-db.h" #include "config-mgr.h" typedef struct _SeafileSession SeafileSession; struct _SeafileSession { char *seaf_dir; char *ccnet_dir; char *tmp_file_dir; /* Config that's only loaded on start */ GKeyFile *config; SeafDB *db; SeafDB *ccnet_db; char *seahub_pk; SeafBlockManager *block_mgr; SeafFSManager *fs_mgr; SeafCommitManager *commit_mgr; SeafBranchManager *branch_mgr; SeafRepoManager *repo_mgr; SeafCfgManager *cfg_mgr; gboolean create_tables; gboolean ccnet_create_tables; }; extern SeafileSession *seaf; SeafileSession * seafile_session_new(const char *central_config_dir, const char *seafile_dir, const char *ccnet_dir, gboolean need_db); #endif ================================================ FILE: server/gc/seafserv-gc.c ================================================ #include "common.h" #include "log.h" #include #include "seafile-session.h" #include "seaf-utils.h" #include "gc-core.h" #include "verify.h" #include "utils.h" static char *ccnet_dir = NULL; static char *seafile_dir = NULL; static char *central_config_dir = NULL; SeafileSession *seaf; static const char *short_opts = "hvc:d:VDrRF:Ct:i:"; static const struct option long_opts[] = { { "help", no_argument, NULL, 'h', }, { "version", no_argument, NULL, 'v', }, { "config-file", required_argument, NULL, 'c', }, { "central-config-dir", required_argument, NULL, 'F' }, { "seafdir", required_argument, NULL, 'd', }, { "verbose", no_argument, NULL, 'V' }, { "dry-run", no_argument, NULL, 'D' }, { "rm-deleted", no_argument, NULL, 'r' }, { "rm-fs", no_argument, NULL, 'R' }, { "check", no_argument, NULL, 'C' }, { "thread-num", required_argument, NULL, 't', }, { "id-prefix", required_argument, NULL, 'i', }, { 0, 0, 0, 0 }, }; static void usage () { fprintf (stderr, "usage: seafserv-gc [-c config_dir] [-d seafile_dir] " "[repo_id_1 [repo_id_2 ...]]\n" "Additional options:\n" "-r, --rm-deleted: remove garbaged repos\n" "-R, --rm-fs: remove fs object\n" "-D, --dry-run: report blocks that can be remove, but not remove them\n" "-V, --verbose: verbose output messages\n" "-C, --check: check data integrity\n" "-t, --thread-num: thread number for gc repos\n"); } #ifdef WIN32 /* Get the commandline arguments in unicode, then convert them to utf8 */ static char ** get_argv_utf8 (int *argc) { int i = 0; char **argv = NULL; const wchar_t *cmdline = NULL; wchar_t **argv_w = NULL; cmdline = GetCommandLineW(); argv_w = CommandLineToArgvW (cmdline, argc); if (!argv_w) { printf("failed to CommandLineToArgvW(), GLE=%lu\n", GetLastError()); return NULL; } argv = (char **)malloc (sizeof(char*) * (*argc)); for (i = 0; i < *argc; i++) { argv[i] = wchar_to_utf8 (argv_w[i]); } return argv; } #endif #define DEFAULT_THREAD_NUM 10 int main(int argc, char *argv[]) { int c; int verbose = 0; int dry_run = 0; int rm_garbage = 0; int rm_fs = 0; int check_integrity = 0; int thread_num = 1; const char *debug_str = NULL; char *id_prefix = NULL; #ifdef WIN32 argv = get_argv_utf8 (&argc); #endif ccnet_dir = DEFAULT_CONFIG_DIR; while ((c = getopt_long(argc, argv, short_opts, long_opts, NULL)) != EOF) { switch (c) { case 'h': usage(); exit(0); case 'v': exit(-1); break; case 'c': ccnet_dir = strdup(optarg); break; case 'd': seafile_dir = strdup(optarg); break; case 'F': central_config_dir = strdup(optarg); break; case 'V': verbose = 1; break; case 'D': dry_run = 1; break; case 'r': rm_garbage = 1; break; case 'R': rm_fs = 1; break; case 'C': check_integrity = 1; break; case 't': thread_num = atoi(optarg); break; case 'i': id_prefix = g_strdup(optarg); break; default: usage(); exit(-1); } } #if !GLIB_CHECK_VERSION(2, 35, 0) g_type_init(); #endif if (!debug_str) debug_str = g_getenv("SEAFILE_DEBUG"); seafile_debug_set_flags_string (debug_str); if (seafile_log_init ("-", "info", "debug", "seafserv-gc") < 0) { fprintf (stderr, "Failed to init log.\n"); exit (1); } if (seafile_dir == NULL) seafile_dir = g_build_filename (ccnet_dir, "seafile-data", NULL); seaf = seafile_session_new(central_config_dir, seafile_dir, ccnet_dir, TRUE); if (!seaf) { seaf_warning ("Failed to create seafile session.\n"); exit (1); } if (rm_garbage) { delete_garbaged_repos (dry_run, thread_num); return 0; } GList *repo_id_list = NULL; int i; for (i = optind; i < argc; i++) repo_id_list = g_list_append (repo_id_list, g_strdup(argv[i])); if (check_integrity) { return verify_repos (repo_id_list); } gc_core_run (repo_id_list, id_prefix, dry_run, verbose, thread_num, rm_fs); g_free (id_prefix); return 0; } ================================================ FILE: server/gc/verify.c ================================================ #include "seafile-session.h" #include "utils.h" #include "log.h" typedef struct VerifyData { SeafRepo *repo; gint64 truncate_time; gboolean traversed_head; GHashTable *exist_blocks; gboolean traverse_base_commit; GHashTable *visited; GHashTable *visited_commits; } VerifyData; static int check_blocks (VerifyData *data, const char *file_id) { SeafRepo *repo = data->repo; Seafile *seafile; int i; seafile = seaf_fs_manager_get_seafile (seaf->fs_mgr, repo->store_id, repo->version, file_id); if (!seafile) { seaf_warning ("Failed to find file %s.\n", file_id); return -1; } for (i = 0; i < seafile->n_blocks; ++i) { if (!g_hash_table_lookup(data->exist_blocks, seafile->blk_sha1s[i])) { seaf_message ("Block %s is missing.\n", seafile->blk_sha1s[i]); } } seafile_unref (seafile); return 0; } static gboolean fs_callback (SeafFSManager *mgr, const char *store_id, int version, const char *obj_id, int type, void *user_data, gboolean *stop) { VerifyData *data = user_data; if (data->visited != NULL) { if (g_hash_table_lookup (data->visited, obj_id) != NULL) { *stop = TRUE; return TRUE; } char *key = g_strdup(obj_id); g_hash_table_replace (data->visited, key, key); } if (data->traverse_base_commit) { return TRUE; } if (type == SEAF_METADATA_TYPE_FILE && check_blocks (data, obj_id) < 0) return FALSE; return TRUE; } static gboolean traverse_commit (SeafCommit *commit, void *vdata, gboolean *stop) { VerifyData *data = vdata; SeafRepo *repo = data->repo; int ret; if (data->visited_commits != NULL) { if (g_hash_table_lookup (data->visited_commits, commit->commit_id)) { // Has traversed on prev head commit, stop traverse from this branch *stop = TRUE; return TRUE; } } if (data->truncate_time == 0) { *stop = TRUE; /* Stop after traversing the head commit. */ } else if (data->truncate_time > 0 && (gint64)(commit->ctime) < data->truncate_time && data->traversed_head) { /* Still traverse the first commit older than truncate_time. * If a file in the child commit of this commit is deleted, * we need to access this commit in order to restore it * from trash. */ *stop = TRUE; } if (!data->traversed_head) data->traversed_head = TRUE; ret = seaf_fs_manager_traverse_tree (seaf->fs_mgr, repo->store_id, repo->version, commit->root_id, fs_callback, vdata, FALSE); if (ret < 0) return FALSE; int dummy; g_hash_table_replace (data->visited_commits, g_strdup (commit->commit_id), &dummy); return TRUE; } static int verify_virtual_repos (VerifyData *data) { SeafRepo *repo = data->repo; if (repo->is_virtual) { return 0; } data->traverse_base_commit = TRUE; GList *vrepo_ids = NULL, *ptr; char *repo_id; SeafVirtRepo *vinfo; int ret = 0; vrepo_ids = seaf_repo_manager_get_virtual_repo_ids_by_origin (seaf->repo_mgr, repo->id); for (ptr = vrepo_ids; ptr; ptr = ptr->next) { repo_id = ptr->data; vinfo = seaf_repo_manager_get_virtual_repo_info (seaf->repo_mgr, repo_id); if (!vinfo) { continue; } gboolean res = seaf_commit_manager_traverse_commit_tree (seaf->commit_mgr, repo->store_id, repo->version, vinfo->base_commit, traverse_commit, data, FALSE); seaf_virtual_repo_info_free (vinfo); if (!res) { seaf_warning ("Failed to traverse base commit %s for virtual repo %s.\n", vinfo->base_commit, repo_id); ret = -1; goto out; } } data->traverse_base_commit = FALSE; out: string_list_free (vrepo_ids); return ret; } static gboolean collect_exist_blocks (const char *store_id, int version, const char *block_id, void *vdata) { GHashTable *exist_blocks = vdata; char *copy = g_strdup (block_id); g_hash_table_replace (exist_blocks, copy, copy); return TRUE; } static int verify_repo (SeafRepo *repo) { GList *branches, *ptr; SeafBranch *branch; int ret = 0; VerifyData data = {0}; data.visited = g_hash_table_new_full (g_str_hash, g_str_equal, g_free, NULL); data.visited_commits = g_hash_table_new_full (g_str_hash, g_str_equal, g_free, NULL); data.repo = repo; data.truncate_time = seaf_repo_manager_get_repo_truncate_time (repo->manager, repo->id); data.exist_blocks = g_hash_table_new_full (g_str_hash, g_str_equal, g_free, NULL); ret = seaf_block_manager_foreach_block (seaf->block_mgr, repo->store_id, repo->version, collect_exist_blocks, data.exist_blocks); if (ret < 0) { seaf_warning ("Failed to collect existing blocks for repo %.8s, stop GC.\n\n", repo->id); g_hash_table_destroy (data.exist_blocks); return ret; } branches = seaf_branch_manager_get_branch_list (seaf->branch_mgr, repo->id); if (branches == NULL) { seaf_warning ("[GC] Failed to get branch list of repo %s.\n", repo->id); g_hash_table_destroy (data.exist_blocks); return -1; } for (ptr = branches; ptr != NULL; ptr = ptr->next) { branch = ptr->data; gboolean res = seaf_commit_manager_traverse_commit_tree (seaf->commit_mgr, repo->id, repo->version, branch->commit_id, traverse_commit, &data, FALSE); seaf_branch_unref (branch); if (!res) { ret = -1; break; } } g_list_free (branches); if (ret < 0) { g_hash_table_destroy (data.visited); g_hash_table_destroy (data.visited_commits); g_hash_table_destroy (data.exist_blocks); return ret; } ret = verify_virtual_repos (&data); g_hash_table_destroy (data.visited); g_hash_table_destroy (data.visited_commits); g_hash_table_destroy (data.exist_blocks); return ret; } int verify_repos (GList *repo_id_list) { if (repo_id_list == NULL) repo_id_list = seaf_repo_manager_get_repo_id_list (seaf->repo_mgr); GList *ptr; SeafRepo *repo; int ret = 0; for (ptr = repo_id_list; ptr != NULL; ptr = ptr->next) { repo = seaf_repo_manager_get_repo_ex (seaf->repo_mgr, (const gchar *)ptr->data); g_free (ptr->data); if (!repo) continue; seaf_message ("Start to verify repo %s\n", repo->id); if (repo->is_corrupted) { seaf_warning ("Repo %s is corrupted.\n", repo->id); } else { ret = verify_repo (repo); if (ret < 0) { seaf_warning ("Failed to verify repo %s\n", repo->id); seaf_repo_unref (repo); continue; } seaf_message ("Verify repo %s success\n", repo->id); seaf_repo_unref (repo); } } g_list_free (repo_id_list); return ret; } ================================================ FILE: server/gc/verify.h ================================================ #ifndef GC_VERIFY_H #define GC_VERIFY_H int verify_repos (GList *repo_id_list); #endif ================================================ FILE: server/http-server.c ================================================ #include "common.h" #ifdef HAVE_EVHTP #include #include #include #include #include #if defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__) #include #else #include #endif #include #include #include "mq-mgr.h" #include "utils.h" #include "log.h" #include "http-server.h" #include "seafile-session.h" #include "diff-simple.h" #include "merge-new.h" #include "seaf-db.h" #include "seaf-utils.h" #include "access-file.h" #include "upload-file.h" #include "fileserver-config.h" #include "http-status-codes.h" #define DEFAULT_BIND_HOST "0.0.0.0" #define DEFAULT_BIND_PORT 8082 #define DEFAULT_WORKER_THREADS 10 #define DEFAULT_MAX_DOWNLOAD_DIR_SIZE 100 * ((gint64)1 << 20) /* 100MB */ #define DEFAULT_MAX_INDEXING_THREADS 1 #define DEFAULT_MAX_INDEX_PROCESSING_THREADS 3 #define DEFAULT_FIXED_BLOCK_SIZE ((gint64)1 << 23) /* 8MB */ #define DEFAULT_CLUSTER_SHARED_TEMP_FILE_MODE 0600 #define HOST "host" #define PORT "port" #define HTTP_TEMP_FILE_SCAN_INTERVAL 3600 /*1h*/ #define HTTP_TEMP_FILE_DEFAULT_TTL 3600 * 24 * 3 /*3days*/ #define HTTP_TEMP_FILE_TTL "http_temp_file_ttl" #define HTTP_SCAN_INTERVAL "http_temp_scan_interval" #define INIT_INFO "If you see this page, Seafile HTTP syncing component works." #define PROTO_VERSION "{\"version\": 2}" #define CLEANING_INTERVAL_SEC 300 /* 5 minutes */ #define TOKEN_EXPIRE_TIME 7200 /* 2 hours */ #define PERM_EXPIRE_TIME 7200 /* 2 hours */ #define VIRINFO_EXPIRE_TIME 7200 /* 2 hours */ #define FS_ID_LIST_MAX_WORKERS 3 #define FS_ID_LIST_TOKEN_LEN 36 struct _HttpServer { evbase_t *evbase; evhtp_t *evhtp; event_t *reap_timer; pthread_t thread_id; GHashTable *token_cache; pthread_mutex_t token_cache_lock; /* token -> username */ GHashTable *perm_cache; pthread_mutex_t perm_cache_lock; /* repo_id:username -> permission */ GHashTable *vir_repo_info_cache; pthread_mutex_t vir_repo_info_cache_lock; GThreadPool *compute_fs_obj_id_pool; GHashTable *fs_obj_ids; pthread_mutex_t fs_obj_ids_lock; }; typedef struct _HttpServer HttpServer; struct _StatsEventData { char *etype; char *user; char *operation; char repo_id[37]; guint64 bytes; }; typedef struct _StatsEventData StatsEventData; typedef struct TokenInfo { char *repo_id; char *email; gint64 expire_time; } TokenInfo; // PermInfo caches the results from the last permission check for accessing a repo. // They're cached in a hash table having "repo_Id:username:op" as key. // The cached result is updated on the next call to get_check_permission_cb function, or when the cache expires. // The result is only cached if the permission check passed. typedef struct PermInfo { gint64 expire_time; } PermInfo; typedef struct VirRepoInfo { char *store_id; gint64 expire_time; } VirRepoInfo; typedef struct FsHdr { char obj_id[40]; guint32 obj_size; } __attribute__((__packed__)) FsHdr; typedef enum CheckExistType { CHECK_FS_EXIST, CHECK_BLOCK_EXIST } CheckExistType; const char *GET_PROTO_PATH = "/protocol-version"; const char *OP_PERM_CHECK_REGEX = "^/repo/[\\da-z]{8}-[\\da-z]{4}-[\\da-z]{4}-[\\da-z]{4}-[\\da-z]{12}/permission-check/.*"; const char *GET_CHECK_QUOTA_REGEX = "^/repo/[\\da-z]{8}-[\\da-z]{4}-[\\da-z]{4}-[\\da-z]{4}-[\\da-z]{12}/quota-check/.*"; const char *HEAD_COMMIT_OPER_REGEX = "^/repo/[\\da-z]{8}-[\\da-z]{4}-[\\da-z]{4}-[\\da-z]{4}-[\\da-z]{12}/commit/HEAD"; const char *GET_HEAD_COMMITS_MULTI_REGEX = "^/repo/head-commits-multi"; const char *COMMIT_OPER_REGEX = "^/repo/[\\da-z]{8}-[\\da-z]{4}-[\\da-z]{4}-[\\da-z]{4}-[\\da-z]{12}/commit/[\\da-z]{40}"; const char *PUT_COMMIT_INFO_REGEX = "^/repo/[\\da-z]{8}-[\\da-z]{4}-[\\da-z]{4}-[\\da-z]{4}-[\\da-z]{12}/commit/[\\da-z]{40}"; const char *GET_FS_OBJ_ID_REGEX = "^/repo/[\\da-z]{8}-[\\da-z]{4}-[\\da-z]{4}-[\\da-z]{4}-[\\da-z]{12}/fs-id-list/.*"; const char *START_FS_OBJ_ID_REGEX = "^/repo/[\\da-z]{8}-[\\da-z]{4}-[\\da-z]{4}-[\\da-z]{4}-[\\da-z]{12}/start-fs-id-list/.*"; const char *QUERY_FS_OBJ_ID_REGEX = "^/repo/[\\da-z]{8}-[\\da-z]{4}-[\\da-z]{4}-[\\da-z]{4}-[\\da-z]{12}/query-fs-id-list/.*"; const char *RETRIEVE_FS_OBJ_ID_REGEX = "^/repo/[\\da-z]{8}-[\\da-z]{4}-[\\da-z]{4}-[\\da-z]{4}-[\\da-z]{12}/retrieve-fs-id-list/.*"; const char *BLOCK_OPER_REGEX = "^/repo/[\\da-z]{8}-[\\da-z]{4}-[\\da-z]{4}-[\\da-z]{4}-[\\da-z]{12}/block/[\\da-z]{40}"; const char *POST_CHECK_FS_REGEX = "^/repo/[\\da-z]{8}-[\\da-z]{4}-[\\da-z]{4}-[\\da-z]{4}-[\\da-z]{12}/check-fs"; const char *POST_CHECK_BLOCK_REGEX = "^/repo/[\\da-z]{8}-[\\da-z]{4}-[\\da-z]{4}-[\\da-z]{4}-[\\da-z]{12}/check-blocks"; const char *POST_RECV_FS_REGEX = "^/repo/[\\da-z]{8}-[\\da-z]{4}-[\\da-z]{4}-[\\da-z]{4}-[\\da-z]{12}/recv-fs"; const char *POST_PACK_FS_REGEX = "^/repo/[\\da-z]{8}-[\\da-z]{4}-[\\da-z]{4}-[\\da-z]{4}-[\\da-z]{12}/pack-fs"; const char *GET_BLOCK_MAP_REGEX = "^/repo/[\\da-z]{8}-[\\da-z]{4}-[\\da-z]{4}-[\\da-z]{4}-[\\da-z]{12}/block-map/[\\da-z]{40}"; const char *GET_JWT_TOKEN_REGEX = "^/repo/[\\da-z]{8}-[\\da-z]{4}-[\\da-z]{4}-[\\da-z]{4}-[\\da-z]{12}/jwt-token"; //accessible repos const char *GET_ACCESSIBLE_REPO_LIST_REGEX = "/accessible-repos"; static void load_http_config (HttpServerStruct *htp_server, SeafileSession *session) { GError *error = NULL; char *host = NULL; int port = 0; int worker_threads; char *encoding; char *cluster_shared_temp_file_mode = NULL; gboolean verify_client_blocks; host = fileserver_config_get_string (session->config, HOST, &error); if (!error) { htp_server->bind_addr = host; } else { if (error->code != G_KEY_FILE_ERROR_KEY_NOT_FOUND && error->code != G_KEY_FILE_ERROR_GROUP_NOT_FOUND) { seaf_warning ("[conf] Error: failed to read the value of 'host'\n"); exit (1); } htp_server->bind_addr = g_strdup (DEFAULT_BIND_HOST); g_clear_error (&error); } port = fileserver_config_get_integer (session->config, PORT, &error); if (!error) { htp_server->bind_port = port; } else { if (error->code != G_KEY_FILE_ERROR_KEY_NOT_FOUND && error->code != G_KEY_FILE_ERROR_GROUP_NOT_FOUND) { seaf_warning ("[conf] Error: failed to read the value of 'port'\n"); exit (1); } htp_server->bind_port = DEFAULT_BIND_PORT; g_clear_error (&error); } worker_threads = fileserver_config_get_integer (session->config, "worker_threads", &error); if (error) { htp_server->worker_threads = DEFAULT_WORKER_THREADS; g_clear_error (&error); } else { if (worker_threads <= 0) htp_server->worker_threads = DEFAULT_WORKER_THREADS; else htp_server->worker_threads = worker_threads; } seaf_message ("fileserver: worker_threads = %d\n", htp_server->worker_threads); verify_client_blocks = fileserver_config_get_boolean (session->config, "verify_client_blocks_after_sync", &error); if (error) { htp_server->verify_client_blocks = TRUE; g_clear_error(&error); } else { htp_server->verify_client_blocks = verify_client_blocks; } seaf_message ("fileserver: verify_client_blocks = %d\n", htp_server->verify_client_blocks); cluster_shared_temp_file_mode = fileserver_config_get_string (session->config, "cluster_shared_temp_file_mode", &error); if (error) { htp_server->cluster_shared_temp_file_mode = DEFAULT_CLUSTER_SHARED_TEMP_FILE_MODE; g_clear_error (&error); } else { if (!cluster_shared_temp_file_mode) { htp_server->cluster_shared_temp_file_mode = DEFAULT_CLUSTER_SHARED_TEMP_FILE_MODE; } else { htp_server->cluster_shared_temp_file_mode = strtol(cluster_shared_temp_file_mode, NULL, 8); if (htp_server->cluster_shared_temp_file_mode < 0001 || htp_server->cluster_shared_temp_file_mode > 0777) htp_server->cluster_shared_temp_file_mode = DEFAULT_CLUSTER_SHARED_TEMP_FILE_MODE; g_free (cluster_shared_temp_file_mode); } } seaf_message ("fileserver: cluster_shared_temp_file_mode = %o\n", htp_server->cluster_shared_temp_file_mode); encoding = g_key_file_get_string (session->config, "zip", "windows_encoding", &error); if (encoding) { htp_server->windows_encoding = encoding; } else { g_clear_error (&error); /* No windows specific encoding is specified. Set the ZIP_UTF8 flag. */ setlocale (LC_ALL, "en_US.UTF-8"); } } static int validate_token (HttpServer *htp_server, evhtp_request_t *req, const char *repo_id, char **username, gboolean skip_cache) { char *email = NULL; TokenInfo *token_info; char *tmp_token = NULL; const char *token = evhtp_kv_find (req->headers_in, "Seafile-Repo-Token"); if (token == NULL) { const char *auth_token = evhtp_kv_find (req->headers_in, "Authorization"); tmp_token = seaf_parse_auth_token (auth_token); if (tmp_token == NULL) { evhtp_send_reply (req, EVHTP_RES_BADREQ); return EVHTP_RES_BADREQ; } token = tmp_token; } if (!skip_cache) { pthread_mutex_lock (&htp_server->token_cache_lock); token_info = g_hash_table_lookup (htp_server->token_cache, token); if (token_info) { if (strcmp (token_info->repo_id, repo_id) != 0) { pthread_mutex_unlock (&htp_server->token_cache_lock); g_free (tmp_token); return EVHTP_RES_FORBIDDEN; } if (username) *username = g_strdup(token_info->email); pthread_mutex_unlock (&htp_server->token_cache_lock); g_free (tmp_token); return EVHTP_RES_OK; } pthread_mutex_unlock (&htp_server->token_cache_lock); } email = seaf_repo_manager_get_email_by_token (seaf->repo_mgr, repo_id, token); if (email == NULL) { pthread_mutex_lock (&htp_server->token_cache_lock); g_hash_table_remove (htp_server->token_cache, token); pthread_mutex_unlock (&htp_server->token_cache_lock); g_free (tmp_token); return EVHTP_RES_FORBIDDEN; } token_info = g_new0 (TokenInfo, 1); token_info->repo_id = g_strdup (repo_id); token_info->expire_time = (gint64)time(NULL) + TOKEN_EXPIRE_TIME; token_info->email = email; pthread_mutex_lock (&htp_server->token_cache_lock); g_hash_table_insert (htp_server->token_cache, g_strdup (token), token_info); pthread_mutex_unlock (&htp_server->token_cache_lock); if (username) *username = g_strdup(email); g_free (tmp_token); return EVHTP_RES_OK; } static PermInfo * lookup_perm_cache (HttpServer *htp_server, const char *repo_id, const char *username, const char *op) { PermInfo *ret = NULL; PermInfo *perm = NULL; char *key = g_strdup_printf ("%s:%s:%s", repo_id, username, op); pthread_mutex_lock (&htp_server->perm_cache_lock); ret = g_hash_table_lookup (htp_server->perm_cache, key); if (ret) { perm = g_new0 (PermInfo, 1); perm->expire_time = ret->expire_time; } pthread_mutex_unlock (&htp_server->perm_cache_lock); g_free (key); return perm; } static char * get_auth_token (evhtp_request_t *req) { const char *token = evhtp_kv_find (req->headers_in, "Seafile-Repo-Token"); if (token) { return g_strdup (token); } char *tmp_token = NULL; const char *auth_token = evhtp_kv_find (req->headers_in, "Authorization"); tmp_token = seaf_parse_auth_token (auth_token); return tmp_token; } static void insert_perm_cache (HttpServer *htp_server, const char *repo_id, const char *username, const char *op, PermInfo *perm) { char *key = g_strdup_printf ("%s:%s:%s", repo_id, username, op); pthread_mutex_lock (&htp_server->perm_cache_lock); g_hash_table_insert (htp_server->perm_cache, key, perm); pthread_mutex_unlock (&htp_server->perm_cache_lock); } static void remove_perm_cache (HttpServer *htp_server, const char *repo_id, const char *username, const char *op) { char *key = g_strdup_printf ("%s:%s:%s", repo_id, username, op); pthread_mutex_lock (&htp_server->perm_cache_lock); g_hash_table_remove (htp_server->perm_cache, key); pthread_mutex_unlock (&htp_server->perm_cache_lock); g_free (key); } static void perm_cache_value_free (gpointer data); static int check_permission (HttpServer *htp_server, const char *repo_id, const char *username, const char *op, gboolean skip_cache) { PermInfo *perm_info = NULL; if (!skip_cache) perm_info = lookup_perm_cache (htp_server, repo_id, username, op); if (perm_info) { perm_cache_value_free (perm_info); return EVHTP_RES_OK; } remove_perm_cache (htp_server, repo_id, username, op); if (strcmp(op, "upload") == 0) { int status = seaf_repo_manager_get_repo_status(seaf->repo_mgr, repo_id); if (status != REPO_STATUS_NORMAL && status != -1) return EVHTP_RES_FORBIDDEN; } char *perm = seaf_repo_manager_check_permission (seaf->repo_mgr, repo_id, username, NULL); if (perm) { if ((strcmp (perm, "r") == 0 && strcmp (op, "upload") == 0)) { g_free (perm); return EVHTP_RES_FORBIDDEN; } g_free (perm); perm_info = g_new0 (PermInfo, 1); /* Take the reference of perm. */ perm_info->expire_time = (gint64)time(NULL) + PERM_EXPIRE_TIME; insert_perm_cache (htp_server, repo_id, username, op, perm_info); return EVHTP_RES_OK; } /* Invalidate cache if perm not found in db. */ return EVHTP_RES_FORBIDDEN; } static gboolean get_vir_repo_info (SeafDBRow *row, void *data) { const char *repo_id = seaf_db_row_get_column_text (row, 0); if (!repo_id) return FALSE; const char *origin_id = seaf_db_row_get_column_text (row, 1); if (!origin_id) return FALSE; VirRepoInfo **vinfo = data; *vinfo = g_new0 (VirRepoInfo, 1); if (!*vinfo) return FALSE; (*vinfo)->store_id = g_strdup (origin_id); if (!(*vinfo)->store_id) return FALSE; (*vinfo)->expire_time = time (NULL) + VIRINFO_EXPIRE_TIME; return TRUE; } static char * get_store_id_from_vir_repo_info_cache (HttpServer *htp_server, const char *repo_id) { char *store_id = NULL; VirRepoInfo *vinfo = NULL; pthread_mutex_lock (&htp_server->vir_repo_info_cache_lock); vinfo = g_hash_table_lookup (htp_server->vir_repo_info_cache, repo_id); if (vinfo) { if (vinfo->store_id) store_id = g_strdup (vinfo->store_id); else store_id = g_strdup (repo_id); vinfo->expire_time = time (NULL) + VIRINFO_EXPIRE_TIME; } pthread_mutex_unlock (&htp_server->vir_repo_info_cache_lock); return store_id; } static void add_vir_info_to_cache (HttpServer *htp_server, const char *repo_id, VirRepoInfo *vinfo) { pthread_mutex_lock (&htp_server->vir_repo_info_cache_lock); g_hash_table_insert (htp_server->vir_repo_info_cache, g_strdup (repo_id), vinfo); pthread_mutex_unlock (&htp_server->vir_repo_info_cache_lock); } static char * get_repo_store_id (HttpServer *htp_server, const char *repo_id) { char *store_id = get_store_id_from_vir_repo_info_cache (htp_server, repo_id); if (store_id) { return store_id; } VirRepoInfo *vinfo = NULL; char *sql = "SELECT repo_id, origin_repo FROM VirtualRepo where repo_id = ?"; int n_row = seaf_db_statement_foreach_row (seaf->db, sql, get_vir_repo_info, &vinfo, 1, "string", repo_id); if (n_row < 0) { // db error, return NULL return NULL; } else if (n_row == 0) { // repo is not virtual repo vinfo = g_new0 (VirRepoInfo, 1); if (!vinfo) return NULL; vinfo->expire_time = time (NULL) + VIRINFO_EXPIRE_TIME; add_vir_info_to_cache (htp_server, repo_id, vinfo); return g_strdup (repo_id); } else if (!vinfo || !vinfo->store_id) { // out of memory, return NULL return NULL; } add_vir_info_to_cache (htp_server, repo_id, vinfo); return g_strdup (vinfo->store_id); } typedef struct { char *etype; char *user; char *ip; char repo_id[37]; char *path; char *client_name; } RepoEventData; static void free_repo_event_data (RepoEventData *data) { if (!data) return; g_free (data->etype); g_free (data->user); g_free (data->ip); g_free (data->path); g_free (data->client_name); g_free (data); } static void free_stats_event_data (StatsEventData *data) { if (!data) return; g_free (data->etype); g_free (data->user); g_free (data->operation); g_free (data); } static void publish_repo_event (RepoEventData *rdata) { json_t *msg = json_object (); char *msg_str = NULL; json_object_set_new (msg, "msg_type", json_string(rdata->etype)); json_object_set_new (msg, "user_name", json_string(rdata->user)); json_object_set_new (msg, "ip", json_string(rdata->ip)); if (rdata->client_name) { json_object_set_new (msg, "user_agent", json_string(rdata->client_name)); } else { json_object_set_new (msg, "user_agent", json_string("")); } json_object_set_new (msg, "repo_id", json_string(rdata->repo_id)); if (rdata->path) { json_object_set_new (msg, "file_path", json_string(rdata->path)); } else { json_object_set_new (msg, "file_path", json_string("/")); } msg_str = json_dumps (msg, JSON_PRESERVE_ORDER); seaf_mq_manager_publish_event (seaf->mq_mgr, SEAFILE_SERVER_CHANNEL_EVENT, msg_str); g_free (msg_str); json_decref (msg); } static void publish_stats_event (StatsEventData *rdata) { json_t *msg = json_object (); char *msg_str = NULL; json_object_set_new (msg, "msg_type", json_string(rdata->etype)); json_object_set_new (msg, "user_name", json_string(rdata->user)); json_object_set_new (msg, "repo_id", json_string(rdata->repo_id)); json_object_set_new (msg, "bytes", json_integer(rdata->bytes)); msg_str = json_dumps (msg, JSON_PRESERVE_ORDER); seaf_mq_manager_publish_event (seaf->mq_mgr, SEAFILE_SERVER_CHANNEL_STATS, msg_str); g_free (msg_str); json_decref (msg); } static void on_repo_oper (HttpServer *htp_server, const char *etype, const char *repo_id, char *user, char *ip, char *client_name) { RepoEventData *rdata = g_new0 (RepoEventData, 1); SeafVirtRepo *vinfo = seaf_repo_manager_get_virtual_repo_info (seaf->repo_mgr, repo_id); if (vinfo) { memcpy (rdata->repo_id, vinfo->origin_repo_id, 36); rdata->path = g_strdup(vinfo->path); } else memcpy (rdata->repo_id, repo_id, 36); rdata->etype = g_strdup (etype); rdata->user = g_strdup (user); rdata->ip = g_strdup (ip); rdata->client_name = g_strdup(client_name); publish_repo_event(rdata); if (vinfo) { g_free (vinfo->path); g_free (vinfo); } free_repo_event_data (rdata); return; } void send_statistic_msg (const char *repo_id, char *user, char *operation, guint64 bytes) { StatsEventData *rdata = g_new0 (StatsEventData, 1); memcpy (rdata->repo_id, repo_id, 36); rdata->etype = g_strdup (operation); rdata->user = g_strdup (user); rdata->bytes = bytes; publish_stats_event(rdata); free_stats_event_data (rdata); return; } char * get_client_ip_addr (void *data) { evhtp_request_t *req = data; const char *xff = evhtp_kv_find (req->headers_in, "X-Forwarded-For"); if (xff) { struct in_addr addr; const char *comma = strchr (xff, ','); char *copy; if (comma) copy = g_strndup(xff, comma-xff); else copy = g_strdup(xff); if (evutil_inet_pton (AF_INET, copy, &addr) == 1) return copy; else if (evutil_inet_pton (AF_INET6, copy, &addr) == 1) return copy; g_free (copy); } evhtp_connection_t *conn = req->conn; if (conn->saddr->sa_family == AF_INET) { char ip_addr[17]; const char *ip = NULL; struct sockaddr_in *addr_in = (struct sockaddr_in *)conn->saddr; memset (ip_addr, '\0', 17); ip = evutil_inet_ntop (AF_INET, &addr_in->sin_addr, ip_addr, 16); return g_strdup (ip); } char ip_addr[47]; const char *ip = NULL; struct sockaddr_in6 *addr_in = (struct sockaddr_in6 *)conn->saddr; memset (ip_addr, '\0', 47); ip = evutil_inet_ntop (AF_INET6, &addr_in->sin6_addr, ip_addr, 46); return g_strdup (ip); } static int validate_client_ver (const char *client_ver) { char **versions = NULL; char *next_str = NULL; versions = g_strsplit (client_ver, ".", 3); if (g_strv_length (versions) != 3) { g_strfreev (versions); return EVHTP_RES_BADREQ; } strtoll (versions[0], &next_str, 10); if (versions[0] == next_str) { g_strfreev (versions); return EVHTP_RES_BADREQ; } strtoll (versions[1], &next_str, 10); if (versions[1] == next_str) { g_strfreev (versions); return EVHTP_RES_BADREQ; } strtoll (versions[2], &next_str, 10); if (versions[2] == next_str) { g_strfreev (versions); return EVHTP_RES_BADREQ; } // todo: judge whether version is too old, then return 426 g_strfreev (versions); return EVHTP_RES_OK; } static void get_check_permission_cb (evhtp_request_t *req, void *arg) { const char *op = evhtp_kv_find (req->uri->query, "op"); if (op == NULL || (strcmp (op, "upload") != 0 && strcmp (op, "download") != 0)) { evhtp_send_reply (req, EVHTP_RES_BADREQ); return; } const char *client_id = evhtp_kv_find (req->uri->query, "client_id"); if (client_id && strlen(client_id) != 40) { evhtp_send_reply (req, EVHTP_RES_BADREQ); return; } const char *client_ver = evhtp_kv_find (req->uri->query, "client_ver"); if (client_ver) { int status = validate_client_ver (client_ver); if (status != EVHTP_RES_OK) { evhtp_send_reply (req, status); return; } } char *client_name = NULL; const char *client_name_in = evhtp_kv_find (req->uri->query, "client_name"); if (client_name_in) client_name = g_uri_unescape_string (client_name_in, NULL); char **parts = g_strsplit (req->uri->path->full + 1, "/", 0); char *repo_id = parts[1]; HttpServer *htp_server = seaf->http_server->priv; char *username = NULL; char *ip = NULL; const char *token; SeafRepo *repo = NULL; repo = seaf_repo_manager_get_repo_ex (seaf->repo_mgr, repo_id); if (!repo) { evhtp_send_reply (req, SEAF_HTTP_RES_REPO_DELETED); goto out; } if (repo->is_corrupted || repo->repaired) { evhtp_send_reply (req, SEAF_HTTP_RES_REPO_CORRUPTED); goto out; } int token_status = validate_token (htp_server, req, repo_id, &username, TRUE); if (token_status != EVHTP_RES_OK) { evhtp_send_reply (req, token_status); goto out; } /* We shall actually check the permission from database, don't rely on * the cache here. */ int perm_status = check_permission (htp_server, repo_id, username, op, TRUE); if (perm_status == EVHTP_RES_FORBIDDEN) { evhtp_send_reply (req, EVHTP_RES_FORBIDDEN); goto out; } ip = get_client_ip_addr (req); if (!ip) { evhtp_send_reply (req, EVHTP_RES_SERVERR); token = evhtp_kv_find (req->headers_in, "Seafile-Repo-Token"); seaf_warning ("[%s] Failed to get client ip.\n", token); goto out; } if (strcmp (op, "download") == 0) { on_repo_oper (htp_server, "repo-download-sync", repo_id, username, ip, client_name); } /* else if (strcmp (op, "upload") == 0) { */ /* on_repo_oper (htp_server, "repo-upload-sync", repo_id, username, ip, client_name); */ /* } */ if (client_id && client_name) { token = evhtp_kv_find (req->headers_in, "Seafile-Repo-Token"); /* Record the (token, email, ) information, may * include peer_id, peer_ip, peer_name, etc. */ if (!seaf_repo_manager_token_peer_info_exists (seaf->repo_mgr, token)) seaf_repo_manager_add_token_peer_info (seaf->repo_mgr, token, client_id, ip, client_name, (gint64)time(NULL), client_ver); else seaf_repo_manager_update_token_peer_info (seaf->repo_mgr, token, ip, (gint64)time(NULL), client_ver); } evhtp_send_reply (req, EVHTP_RES_OK); out: g_free (username); g_strfreev (parts); g_free (ip); g_free (client_name); if (repo) { seaf_repo_unref (repo); } } static void get_protocol_cb (evhtp_request_t *req, void *arg) { evbuffer_add (req->buffer_out, PROTO_VERSION, strlen (PROTO_VERSION)); evhtp_send_reply (req, EVHTP_RES_OK); } static void get_check_quota_cb (evhtp_request_t *req, void *arg) { HttpServer *htp_server = seaf->http_server->priv; char **parts = g_strsplit (req->uri->path->full + 1, "/", 0); char *repo_id = parts[1]; int token_status = validate_token (htp_server, req, repo_id, NULL, FALSE); if (token_status != EVHTP_RES_OK) { evhtp_send_reply (req, token_status); goto out; } const char *delta = evhtp_kv_find (req->uri->query, "delta"); if (delta == NULL) { char *error = "Invalid delta parameter.\n"; seaf_warning ("%s", error); evbuffer_add (req->buffer_out, error, strlen (error)); evhtp_send_reply (req, EVHTP_RES_BADREQ); goto out; } char *next_ptr = NULL; gint64 delta_num = strtoll(delta, &next_ptr, 10); if (!(*delta != '\0' && *next_ptr == '\0')) { char *error = "Invalid delta parameter.\n"; seaf_warning ("%s", error); evbuffer_add (req->buffer_out, error, strlen (error)); evhtp_send_reply (req, EVHTP_RES_BADREQ); goto out; } int ret = seaf_quota_manager_check_quota_with_delta (seaf->quota_mgr, repo_id, delta_num); if (ret < 0) { evhtp_send_reply (req, EVHTP_RES_SERVERR); } else if (ret == 0) { evhtp_send_reply (req, EVHTP_RES_OK); } else { evhtp_send_reply (req, SEAF_HTTP_RES_NOQUOTA); } out: g_strfreev (parts); } static gboolean get_branch (SeafDBRow *row, void *vid) { char *ret = vid; const char *commit_id; commit_id = seaf_db_row_get_column_text (row, 0); memcpy (ret, commit_id, 41); return FALSE; } static void get_head_commit_cb (evhtp_request_t *req, void *arg) { HttpServer *htp_server = seaf->http_server->priv; char **parts = g_strsplit (req->uri->path->full + 1, "/", 0); char *repo_id = parts[1]; gboolean db_err = FALSE, exists = TRUE; int token_status; char commit_id[41]; char *sql; sql = "SELECT 1 FROM Repo WHERE repo_id=?"; exists = seaf_db_statement_exists (seaf->db, sql, &db_err, 1, "string", repo_id); if (!exists) { if (db_err) { seaf_warning ("DB error when check repo existence.\n"); evbuffer_add_printf (req->buffer_out, "{\"is_corrupted\": 1}"); evhtp_send_reply (req, EVHTP_RES_OK); goto out; } evhtp_send_reply (req, SEAF_HTTP_RES_REPO_DELETED); goto out; } token_status = validate_token (htp_server, req, repo_id, NULL, FALSE); if (token_status != EVHTP_RES_OK) { evhtp_send_reply (req, token_status); goto out; } commit_id[0] = 0; sql = "SELECT commit_id FROM Branch WHERE name='master' AND repo_id=?"; if (seaf_db_statement_foreach_row (seaf->db, sql, get_branch, commit_id, 1, "string", repo_id) < 0) { seaf_warning ("DB error when get branch master.\n"); evbuffer_add_printf (req->buffer_out, "{\"is_corrupted\": 1}"); evhtp_send_reply (req, EVHTP_RES_OK); goto out; } if (commit_id[0] == 0) { evhtp_send_reply (req, SEAF_HTTP_RES_REPO_DELETED); goto out; } evbuffer_add_printf (req->buffer_out, "{\"is_corrupted\": 0, \"head_commit_id\": \"%s\"}", commit_id); evhtp_send_reply (req, EVHTP_RES_OK); out: g_strfreev (parts); } static char * gen_merge_description (SeafRepo *repo, const char *merged_root, const char *p1_root, const char *p2_root) { GList *p; GList *results = NULL; char *desc; diff_merge_roots (repo->store_id, repo->version, merged_root, p1_root, p2_root, &results, TRUE); desc = diff_results_to_description (results); for (p = results; p; p = p->next) { DiffEntry *de = p->data; diff_entry_free (de); } g_list_free (results); return desc; } static int fast_forward_or_merge (const char *repo_id, SeafCommit *base, SeafCommit *new_commit, const char *token, gboolean *is_gc_conflict) { #define MAX_RETRY_COUNT 3 SeafRepo *repo = NULL; SeafCommit *current_head = NULL, *merged_commit = NULL; int retry_cnt = 0; int ret = 0; char *last_gc_id = NULL; gboolean check_gc; gboolean gc_conflict = FALSE; repo = seaf_repo_manager_get_repo (seaf->repo_mgr, repo_id); if (!repo) { seaf_warning ("Repo %s doesn't exist.\n", repo_id); ret = -1; goto out; } /* In some uploads, no blocks need to be uploaded. For example, deleting * a file or folder. In such cases, checkbl won't be called. * So the last gc id is not inserted to the database. We don't need to * check gc for these cases since no new blocks are uploaded. * * Note that having a 'NULL' gc id in database is not the same as not having * a last gc id record. The former one indicates that, before block upload, * no GC has been performed; the latter one indicates no _new_ blocks are * being referenced by this new commit. */ if (seaf_db_type(seaf->db) == SEAF_DB_TYPE_SQLITE) check_gc = FALSE; else check_gc = seaf_repo_has_last_gc_id (repo, token); if (check_gc) { last_gc_id = seaf_repo_get_last_gc_id (repo, token); seaf_repo_remove_last_gc_id (repo, token); } retry: current_head = seaf_commit_manager_get_commit (seaf->commit_mgr, repo->id, repo->version, repo->head->commit_id); if (!current_head) { seaf_warning ("Failed to find head commit of %s.\n", repo_id); ret = -1; goto out; } /* Merge if base and head are not the same. */ if (strcmp (base->commit_id, current_head->commit_id) != 0) { MergeOptions opt; const char *roots[3]; char *desc = NULL; memset (&opt, 0, sizeof(opt)); opt.n_ways = 3; memcpy (opt.remote_repo_id, repo_id, 36); memcpy (opt.remote_head, new_commit->commit_id, 40); opt.do_merge = TRUE; roots[0] = base->root_id; /* base */ roots[1] = current_head->root_id; /* head */ roots[2] = new_commit->root_id; /* remote */ if (seaf_merge_trees (repo->store_id, repo->version, 3, roots, &opt) < 0) { seaf_warning ("Failed to merge.\n"); ret = -1; goto out; } if (!opt.conflict) desc = g_strdup("Auto merge by system"); else { desc = gen_merge_description (repo, opt.merged_tree_root, current_head->root_id, new_commit->root_id); if (!desc) desc = g_strdup("Auto merge by system"); } merged_commit = seaf_commit_new(NULL, repo->id, opt.merged_tree_root, new_commit->creator_name, EMPTY_SHA1, desc, 0); g_free (desc); merged_commit->parent_id = g_strdup (current_head->commit_id); merged_commit->second_parent_id = g_strdup (new_commit->commit_id); merged_commit->new_merge = TRUE; if (opt.conflict) merged_commit->conflict = TRUE; seaf_repo_to_commit (repo, merged_commit); if (seaf_commit_manager_add_commit (seaf->commit_mgr, merged_commit) < 0) { seaf_warning ("Failed to add commit.\n"); ret = -1; goto out; } } else { seaf_commit_ref (new_commit); merged_commit = new_commit; } seaf_branch_set_commit(repo->head, merged_commit->commit_id); gc_conflict = FALSE; if (seaf_branch_manager_test_and_update_branch(seaf->branch_mgr, repo->head, current_head->commit_id, check_gc, last_gc_id, repo->store_id, &gc_conflict) < 0) { if (gc_conflict) { if (is_gc_conflict) { *is_gc_conflict = TRUE; } seaf_warning ("Head branch update for repo %s conflicts with GC.\n", repo_id); ret = -1; goto out; } seaf_repo_unref (repo); repo = NULL; seaf_commit_unref (current_head); current_head = NULL; seaf_commit_unref (merged_commit); merged_commit = NULL; if (++retry_cnt <= MAX_RETRY_COUNT) { /* Sleep random time between 100 and 1000 millisecs. */ usleep (g_random_int_range(1, 11) * 100 * 1000); repo = seaf_repo_manager_get_repo (seaf->repo_mgr, repo_id); if (!repo) { seaf_warning ("Repo %s doesn't exist.\n", repo_id); ret = -1; goto out; } goto retry; } else { ret = -1; goto out; } } out: g_free (last_gc_id); seaf_commit_unref (current_head); seaf_commit_unref (merged_commit); seaf_repo_unref (repo); return ret; } typedef struct CheckBlockAux { GList *file_list; const char *store_id; int version; } CheckBlockAux; static int check_file_blocks (int n, const char *basedir, SeafDirent *files[], void *data) { Seafile *file = NULL; char *block_id; int i = 0; SeafDirent *file1 = files[0]; SeafDirent *file2 = files[1]; CheckBlockAux *aux = (CheckBlockAux*)data; if (!file2 || strcmp (file2->id, EMPTY_SHA1) == 0 || (file1 && strcmp (file1->id, file2->id) == 0)) { return 0; } file = seaf_fs_manager_get_seafile (seaf->fs_mgr, aux->store_id, aux->version, file2->id); if (!file) { return -1; } for (i = 0; i < file->n_blocks; ++i) { block_id = file->blk_sha1s[i]; if (!seaf_block_manager_block_exists (seaf->block_mgr, aux->store_id, aux->version, block_id)) { aux->file_list = g_list_prepend (aux->file_list, g_strdup (file2->name)); goto out; } } out: seafile_unref (file); return 0; } static int check_dir_cb (int n, const char *basedir, SeafDirent *dirs[], void *data, gboolean *recurse) { SeafDirent *dir1 = dirs[0]; SeafDirent *dir2 = dirs[1]; if (!dir1) { // if dir2 is empty, stop diff. if (g_strcmp0 (dir2->id, EMPTY_SHA1) == 0) { *recurse = FALSE; } else { *recurse = TRUE; } return 0; } // if dir2 is not exist, stop diff. if (!dir2) { *recurse = FALSE; return 0; } // if dir1 and dir2 are the same or dir2 is empty, stop diff. if (g_strcmp0 (dir1->id, dir2->id) == 0 || g_strcmp0 (dir2->id, EMPTY_SHA1) == 0) { *recurse = FALSE; return 0; } return 0; } static int check_blocks (SeafRepo *repo, SeafCommit *base, SeafCommit *remote, char **ret_body) { DiffOptions opts; memset (&opts, 0, sizeof(opts)); memcpy (opts.store_id, repo->store_id, 36); opts.version = repo->version; opts.file_cb = check_file_blocks; opts.dir_cb = check_dir_cb; CheckBlockAux aux; memset (&aux, 0, sizeof(aux)); aux.store_id = repo->store_id; aux.version = repo->version; opts.data = &aux; const char *trees[2]; trees[0] = base->root_id; trees[1] = remote->root_id; if (diff_trees (2, trees, &opts) < 0) { seaf_warning ("Failed to diff base and remote head for repo %.8s.\n", repo->id); return -1; } if (!aux.file_list) { return 0; } json_t *obj_array = json_array (); GList *ptr; for (ptr = aux.file_list; ptr; ptr = ptr->next) { json_array_append_new (obj_array, json_string (ptr->data)); g_free (ptr->data); } g_list_free (aux.file_list); *ret_body = json_dumps (obj_array, JSON_COMPACT); json_decref (obj_array); return -1; } gboolean should_ignore (const char *filename) { char **components = g_strsplit (filename, "/", -1); int n_comps = g_strv_length (components); int j = 0; char *file_name; for (; j < n_comps; ++j) { file_name = components[j]; if (g_strcmp0(file_name, "..") == 0) { g_strfreev (components); return TRUE; } } g_strfreev (components); return FALSE; } static gboolean include_invalid_path (SeafCommit *base_commit, SeafCommit *new_commit) { GList *diff_entries = NULL; gboolean ret = FALSE; int rc = diff_commits (base_commit, new_commit, &diff_entries, TRUE); if (rc < 0) { seaf_warning ("Failed to check invalid path.\n"); return FALSE; } GList *ptr; DiffEntry *diff_entry; for (ptr = diff_entries; ptr; ptr = ptr->next) { diff_entry = ptr->data; if (diff_entry->new_name) { if (should_ignore(diff_entry->new_name)) { ret = TRUE; break; } } else { if (should_ignore(diff_entry->name)) { ret = TRUE; break; } } } return ret; } static void put_update_branch_cb (evhtp_request_t *req, void *arg) { HttpServer *htp_server = seaf->http_server->priv; char **parts; char *repo_id; char *username = NULL; SeafRepo *repo = NULL; SeafCommit *new_commit = NULL, *base = NULL; char *token = NULL; const char *new_commit_id = evhtp_kv_find (req->uri->query, "head"); if (new_commit_id == NULL || !is_object_id_valid (new_commit_id)) { evhtp_send_reply (req, EVHTP_RES_BADREQ); return; } parts = g_strsplit (req->uri->path->full + 1, "/", 0); repo_id = parts[1]; int token_status = validate_token (htp_server, req, repo_id, &username, FALSE); if (token_status != EVHTP_RES_OK) { evhtp_send_reply (req, token_status); goto out; } int perm_status = check_permission (htp_server, repo_id, username, "upload", FALSE); if (perm_status == EVHTP_RES_FORBIDDEN) { evhtp_send_reply (req, EVHTP_RES_FORBIDDEN); goto out; } repo = seaf_repo_manager_get_repo (seaf->repo_mgr, repo_id); if (!repo) { seaf_warning ("Repo %s is missing or corrupted.\n", repo_id); evhtp_send_reply (req, EVHTP_RES_SERVERR); goto out; } /* Since this is the last step of upload procedure, commit should exist. */ new_commit = seaf_commit_manager_get_commit (seaf->commit_mgr, repo->id, repo->version, new_commit_id); if (!new_commit) { evhtp_send_reply (req, EVHTP_RES_SERVERR); goto out; } base = seaf_commit_manager_get_commit (seaf->commit_mgr, repo->id, repo->version, new_commit->parent_id); if (!base) { evhtp_send_reply (req, EVHTP_RES_BADREQ); goto out; } if (include_invalid_path (base, new_commit)) { evhtp_send_reply (req, EVHTP_RES_BADREQ); goto out; } if (seaf_quota_manager_check_quota (seaf->quota_mgr, repo_id) < 0) { evhtp_send_reply (req, SEAF_HTTP_RES_NOQUOTA); goto out; } token = get_auth_token (req); if (seaf->http_server->verify_client_blocks) { char *ret_body = NULL; int rc = check_blocks(repo, base, new_commit, &ret_body); if (rc < 0) { if (ret_body) { evbuffer_add (req->buffer_out, ret_body, strlen (ret_body)); } evhtp_send_reply (req, SEAF_HTTP_RES_BLOCK_MISSING); g_free (ret_body); goto out; } } gboolean gc_conflict = FALSE; if (fast_forward_or_merge (repo_id, base, new_commit, token, &gc_conflict) < 0) { if (gc_conflict) { char *msg = "GC Conflict.\n"; evbuffer_add (req->buffer_out, msg, strlen (msg)); evhtp_send_reply (req, EVHTP_RES_CONFLICT); } else { seaf_warning ("Fast forward merge for repo %s is failed.\n", repo_id); evhtp_send_reply (req, EVHTP_RES_SERVERR); } goto out; } seaf_repo_manager_merge_virtual_repo (seaf->repo_mgr, repo_id, NULL); schedule_repo_size_computation (seaf->size_sched, repo_id); evhtp_send_reply (req, EVHTP_RES_OK); out: g_free (token); seaf_repo_unref (repo); seaf_commit_unref (new_commit); seaf_commit_unref (base); g_free (username); g_strfreev (parts); } static void head_commit_oper_cb (evhtp_request_t *req, void *arg) { htp_method req_method = evhtp_request_get_method (req); if (req_method == htp_method_GET) { get_head_commit_cb (req, arg); } else if (req_method == htp_method_PUT) { put_update_branch_cb (req, arg); } } static gboolean collect_head_commit_ids (SeafDBRow *row, void *data) { json_t *map = (json_t *)data; const char *repo_id = seaf_db_row_get_column_text (row, 0); const char *commit_id = seaf_db_row_get_column_text (row, 1); json_object_set_new (map, repo_id, json_string(commit_id)); return TRUE; } static void head_commits_multi_cb (evhtp_request_t *req, void *arg) { size_t list_len; json_t *repo_id_array = NULL; size_t n, i; GString *id_list_str = NULL; char *sql = NULL; json_t *commit_id_map = NULL; char *data = NULL; list_len = evbuffer_get_length (req->buffer_in); if (list_len == 0) { evhtp_send_reply (req, EVHTP_RES_BADREQ); goto out; } char *repo_id_list_con = g_new0 (char, list_len); if (!repo_id_list_con) { evhtp_send_reply (req, EVHTP_RES_SERVERR); seaf_warning ("Failed to allocate %lu bytes memory.\n", list_len); goto out; } json_error_t jerror; evbuffer_remove (req->buffer_in, repo_id_list_con, list_len); repo_id_array = json_loadb (repo_id_list_con, list_len, 0, &jerror); g_free (repo_id_list_con); if (!repo_id_array) { seaf_warning ("load repo_id_list to json failed, error: %s\n", jerror.text); evhtp_send_reply (req, EVHTP_RES_BADREQ); goto out; } n = json_array_size (repo_id_array); if (n == 0) { evhtp_send_reply (req, EVHTP_RES_BADREQ); goto out; } json_t *id; id_list_str = g_string_new (""); for (i = 0; i < n; ++i) { id = json_array_get (repo_id_array, i); if (json_typeof(id) != JSON_STRING) { evhtp_send_reply (req, EVHTP_RES_BADREQ); goto out; } /* Make sure ids are in UUID format. */ if (!is_uuid_valid (json_string_value (id))) { evhtp_send_reply (req, EVHTP_RES_BADREQ); goto out; } if (i == 0) g_string_append_printf (id_list_str, "'%s'", json_string_value(id)); else g_string_append_printf (id_list_str, ",'%s'", json_string_value(id)); } if (seaf_db_type (seaf->db) == SEAF_DB_TYPE_MYSQL) sql = g_strdup_printf ("SELECT repo_id, commit_id FROM Branch WHERE name='master' AND repo_id IN (%s) LOCK IN SHARE MODE", id_list_str->str); else sql = g_strdup_printf ("SELECT repo_id, commit_id FROM Branch WHERE name='master' AND repo_id IN (%s)", id_list_str->str); commit_id_map = json_object(); if (seaf_db_statement_foreach_row (seaf->db, sql, collect_head_commit_ids, commit_id_map, 0) < 0) { evhtp_send_reply (req, EVHTP_RES_SERVERR); goto out; } data = json_dumps (commit_id_map, JSON_COMPACT); if (!data) { seaf_warning ("failed to dump json.\n"); evhtp_send_reply (req, EVHTP_RES_SERVERR); goto out; } evbuffer_add (req->buffer_out, data, strlen(data)); evhtp_send_reply (req, EVHTP_RES_OK); out: if (repo_id_array) json_decref (repo_id_array); if (id_list_str) g_string_free (id_list_str, TRUE); g_free (sql); if (commit_id_map) json_decref (commit_id_map); if (data) free (data); } static void get_commit_info_cb (evhtp_request_t *req, void *arg) { HttpServer *htp_server = seaf->http_server->priv; char **parts = g_strsplit (req->uri->path->full + 1, "/", 0); char *repo_id = parts[1]; char *commit_id = parts[3]; char *username = NULL; int token_status = validate_token (htp_server, req, repo_id, &username, FALSE); if (token_status != EVHTP_RES_OK) { evhtp_send_reply (req, token_status); goto out; } int perm_status = check_permission (htp_server, repo_id, username, "download", FALSE); if (perm_status != EVHTP_RES_OK) { evhtp_send_reply (req, EVHTP_RES_FORBIDDEN); goto out; } char *data = NULL; int len; int ret = seaf_obj_store_read_obj (seaf->commit_mgr->obj_store, repo_id, 1, commit_id, (void **)&data, &len); if (ret < 0) { seaf_warning ("Get commit info failed: commit %s is missing.\n", commit_id); evhtp_send_reply (req, EVHTP_RES_NOTFOUND); goto out; } evbuffer_add (req->buffer_out, data, len); evhtp_send_reply (req, EVHTP_RES_OK); g_free (data); out: g_free (username); g_strfreev (parts); } static int save_last_gc_id (const char *repo_id, const char *token) { SeafRepo *repo; char *gc_id; repo = seaf_repo_manager_get_repo (seaf->repo_mgr, repo_id); if (!repo) { seaf_warning ("Failed to find repo %s.\n", repo_id); return -1; } gc_id = seaf_repo_get_current_gc_id (repo); seaf_repo_set_last_gc_id (repo, token, gc_id); g_free (gc_id); seaf_repo_unref (repo); return 0; } static void put_commit_cb (evhtp_request_t *req, void *arg) { HttpServer *htp_server = seaf->http_server->priv; char **parts = g_strsplit (req->uri->path->full + 1, "/", 0); char *repo_id = parts[1]; char *commit_id = parts[3]; char *username = NULL; void *data = NULL; int token_status = validate_token (htp_server, req, repo_id, &username, FALSE); if (token_status != EVHTP_RES_OK) { evhtp_send_reply (req, token_status); goto out; } int perm_status = check_permission (htp_server, repo_id, username, "upload", FALSE); if (perm_status == EVHTP_RES_FORBIDDEN) { evhtp_send_reply (req, EVHTP_RES_FORBIDDEN); goto out; } int con_len = evbuffer_get_length (req->buffer_in); if(con_len == 0) { evhtp_send_reply (req, EVHTP_RES_BADREQ); goto out; } data = g_new0 (char, con_len); if (!data) { evhtp_send_reply (req, EVHTP_RES_SERVERR); seaf_warning ("Failed to allocate %d bytes memory.\n", con_len); goto out; } evbuffer_remove (req->buffer_in, data, con_len); SeafCommit *commit = seaf_commit_from_data (commit_id, (char *)data, con_len); if (!commit) { evhtp_send_reply (req, EVHTP_RES_BADREQ); goto out; } if (strcmp (commit->repo_id, repo_id) != 0) { evhtp_send_reply (req, EVHTP_RES_BADREQ); goto out; } if (seaf_commit_manager_add_commit (seaf->commit_mgr, commit) < 0) { evhtp_send_reply (req, EVHTP_RES_SERVERR); } else { /* Last GCID must be set before checking blocks. However, in http sync, * block list may be sent in multiple http requests. There is no way to * tell which one is the first check block request. * * So we set the last GCID just before replying to upload commit * request. One consequence is that even if the following upload * doesn't upload new blocks, we still need to check gc conflict in * update-branch request. Since gc conflict is a rare case, this solution * won't introduce many more gc conflicts. */ char *token = get_auth_token (req); if (save_last_gc_id (repo_id, token) < 0) { evhtp_send_reply (req, EVHTP_RES_SERVERR); } else evhtp_send_reply (req, EVHTP_RES_OK); g_free (token); } seaf_commit_unref (commit); out: g_free (username); g_free (data); g_strfreev (parts); } static void commit_oper_cb (evhtp_request_t *req, void *arg) { htp_method req_method = evhtp_request_get_method (req); if (req_method == htp_method_PUT) { put_commit_cb (req, arg); } else if (req_method == htp_method_GET) { get_commit_info_cb (req, arg); } } static int collect_file_ids (int n, const char *basedir, SeafDirent *files[], void *data) { SeafDirent *file1 = files[0]; SeafDirent *file2 = files[1]; GList **pret = data; if (file1 && (!file2 || strcmp(file1->id, file2->id) != 0) && strcmp (file1->id, EMPTY_SHA1) != 0) *pret = g_list_prepend (*pret, g_strdup(file1->id)); return 0; } static int collect_file_ids_nop (int n, const char *basedir, SeafDirent *files[], void *data) { return 0; } static int collect_dir_ids (int n, const char *basedir, SeafDirent *dirs[], void *data, gboolean *recurse) { SeafDirent *dir1 = dirs[0]; SeafDirent *dir2 = dirs[1]; GList **pret = data; if (dir1 && (!dir2 || strcmp(dir1->id, dir2->id) != 0) && strcmp (dir1->id, EMPTY_SHA1) != 0) *pret = g_list_prepend (*pret, g_strdup(dir1->id)); return 0; } static int calculate_send_object_list (SeafRepo *repo, const char *server_head, const char *client_head, gboolean dir_only, GList **results) { SeafCommit *remote_head = NULL, *master_head = NULL; char *remote_head_root; int ret = 0; *results = NULL; master_head = seaf_commit_manager_get_commit (seaf->commit_mgr, repo->id, repo->version, server_head); if (!master_head) { seaf_warning ("Server head commit %s:%s not found.\n", repo->id, server_head); return -1; } if (client_head) { remote_head = seaf_commit_manager_get_commit (seaf->commit_mgr, repo->id, repo->version, client_head); if (!remote_head) { ret = -1; goto out; } remote_head_root = remote_head->root_id; } else remote_head_root = EMPTY_SHA1; /* Diff won't traverse the root object itself. */ if (strcmp (remote_head_root, master_head->root_id) != 0 && strcmp (master_head->root_id, EMPTY_SHA1) != 0) *results = g_list_prepend (*results, g_strdup(master_head->root_id)); DiffOptions opts; memset (&opts, 0, sizeof(opts)); memcpy (opts.store_id, repo->store_id, 36); opts.version = repo->version; if (!dir_only) opts.file_cb = collect_file_ids; else opts.file_cb = collect_file_ids_nop; opts.dir_cb = collect_dir_ids; opts.data = results; const char *trees[2]; trees[0] = master_head->root_id; trees[1] = remote_head_root; if (diff_trees (2, trees, &opts) < 0) { seaf_warning ("Failed to diff remote and master head for repo %.8s.\n", repo->id); string_list_free (*results); ret = -1; } out: seaf_commit_unref (remote_head); seaf_commit_unref (master_head); return ret; } static void get_fs_obj_id_cb (evhtp_request_t *req, void *arg) { HttpServer *htp_server = seaf->http_server->priv; char **parts; char *repo_id; SeafRepo *repo = NULL; gboolean dir_only = FALSE; char *username = NULL; const char *server_head = evhtp_kv_find (req->uri->query, "server-head"); if (server_head == NULL || !is_object_id_valid (server_head)) { char *error = "Invalid server-head parameter.\n"; seaf_warning ("%s", error); evbuffer_add (req->buffer_out, error, strlen (error)); evhtp_send_reply (req, EVHTP_RES_BADREQ); return; } const char *client_head = evhtp_kv_find (req->uri->query, "client-head"); if (client_head && !is_object_id_valid (client_head)) { char *error = "Invalid client-head parameter.\n"; seaf_warning ("%s", error); evbuffer_add (req->buffer_out, error, strlen (error)); evhtp_send_reply (req, EVHTP_RES_BADREQ); return; } const char *dir_only_arg = evhtp_kv_find (req->uri->query, "dir-only"); if (dir_only_arg) dir_only = TRUE; parts = g_strsplit (req->uri->path->full + 1, "/", 0); repo_id = parts[1]; int token_status = validate_token (htp_server, req, repo_id, &username, FALSE); if (token_status != EVHTP_RES_OK) { evhtp_send_reply (req, token_status); goto out; } int perm_status = check_permission (htp_server, repo_id, username, "download", FALSE); if (perm_status != EVHTP_RES_OK) { evhtp_send_reply (req, EVHTP_RES_FORBIDDEN); goto out; } GList *list = NULL, *ptr; repo = seaf_repo_manager_get_repo (seaf->repo_mgr, repo_id); if (!repo) { seaf_warning ("Failed to find repo %.8s.\n", repo_id); evhtp_send_reply (req, EVHTP_RES_SERVERR); goto out; } if (calculate_send_object_list (repo, server_head, client_head, dir_only, &list) < 0) { evhtp_send_reply (req, EVHTP_RES_SERVERR); goto out; } json_t *obj_array = json_array (); for (ptr = list; ptr; ptr = ptr->next) { json_array_append_new (obj_array, json_string (ptr->data)); g_free (ptr->data); } g_list_free (list); char *obj_list = json_dumps (obj_array, JSON_COMPACT); evbuffer_add (req->buffer_out, obj_list, strlen (obj_list)); evhtp_send_reply (req, EVHTP_RES_OK); g_free (obj_list); json_decref (obj_array); out: g_free (username); g_strfreev (parts); seaf_repo_unref (repo); } typedef struct ComputeObjTask { HttpServer *htp_server; char *token; char *repo_id; char *client_head; char *server_head; gboolean dir_only; } ComputeObjTask; typedef struct CalObjResult { GList *list; gboolean done; } CalObjResult; static void free_compute_obj_task(ComputeObjTask *task) { if (!task) return; if (task->token) g_free(task->token); if (task->repo_id) g_free(task->repo_id); if (task->client_head) g_free(task->client_head); if (task->server_head) g_free(task->server_head); g_free(task); } static void free_obj_cal_result (gpointer data) { CalObjResult *result = (CalObjResult *)data; if (!result) return; if (result->list) g_list_free (result->list); g_free(result); } static void compute_fs_obj_id (gpointer ptask, gpointer ppara) { SeafRepo *repo = NULL; ComputeObjTask *task = ptask; const char *client_head = task->client_head; const char *server_head = task->server_head; char *repo_id = task->repo_id; gboolean dir_only = task->dir_only; HttpServer *htp_server = task->htp_server; CalObjResult *result = NULL; pthread_mutex_lock (&htp_server->fs_obj_ids_lock); result = g_hash_table_lookup (htp_server->fs_obj_ids, task->token); pthread_mutex_unlock (&htp_server->fs_obj_ids_lock); if (!result) { goto out; } repo = seaf_repo_manager_get_repo (seaf->repo_mgr, repo_id); if (!repo) { seaf_warning ("Failed to find repo %.8s.\n", repo_id); goto out; } if (calculate_send_object_list (repo, server_head, client_head, dir_only, &result->list) < 0) { pthread_mutex_lock (&htp_server->fs_obj_ids_lock); g_hash_table_remove (htp_server->fs_obj_ids, task->token); pthread_mutex_unlock (&htp_server->fs_obj_ids_lock); goto out; } result->done = TRUE; out: seaf_repo_unref (repo); free_compute_obj_task(task); } static void start_fs_obj_id_cb (evhtp_request_t *req, void *arg) { HttpServer *htp_server = seaf->http_server->priv; char **parts; char *repo_id; gboolean dir_only = FALSE; json_t *obj; const char *server_head = evhtp_kv_find (req->uri->query, "server-head"); if (server_head == NULL || !is_object_id_valid (server_head)) { char *error = "Invalid server-head parameter.\n"; evbuffer_add (req->buffer_out, error, strlen (error)); evhtp_send_reply (req, EVHTP_RES_BADREQ); return; } const char *client_head = evhtp_kv_find (req->uri->query, "client-head"); if (client_head && !is_object_id_valid (client_head)) { char *error = "Invalid client-head parameter.\n"; evbuffer_add (req->buffer_out, error, strlen (error)); evhtp_send_reply (req, EVHTP_RES_BADREQ); return; } const char *dir_only_arg = evhtp_kv_find (req->uri->query, "dir-only"); if (dir_only_arg) dir_only = TRUE; parts = g_strsplit (req->uri->path->full + 1, "/", 0); repo_id = parts[1]; int token_status = validate_token (htp_server, req, repo_id, NULL, FALSE); if (token_status != EVHTP_RES_OK) { evhtp_send_reply (req, token_status); goto out; } char uuid[37]; char *new_token; gen_uuid_inplace (uuid); new_token = g_strndup(uuid, FS_ID_LIST_TOKEN_LEN); CalObjResult *result = g_new0(CalObjResult, 1); if (!result) { evhtp_send_reply (req, EVHTP_RES_SERVERR); goto out; } result->done = FALSE; ComputeObjTask *task = g_new0 (ComputeObjTask, 1); if (!task) { evhtp_send_reply (req, EVHTP_RES_SERVERR); goto out; } task->token = new_token; task->dir_only = dir_only; task->htp_server = htp_server; task->repo_id = g_strdup(repo_id); task->client_head = g_strdup(client_head); task->server_head = g_strdup(server_head); pthread_mutex_lock (&htp_server->fs_obj_ids_lock); g_hash_table_insert (htp_server->fs_obj_ids, g_strdup(task->token), result); pthread_mutex_unlock (&htp_server->fs_obj_ids_lock); g_thread_pool_push (htp_server->compute_fs_obj_id_pool, task, NULL); obj = json_object (); json_object_set_new (obj, "token", json_string (new_token)); char *json_str = json_dumps (obj, JSON_COMPACT); evbuffer_add (req->buffer_out, json_str, strlen(json_str)); evhtp_send_reply (req, EVHTP_RES_OK); g_free (json_str); json_decref (obj); out: g_strfreev (parts); } static void query_fs_obj_id_cb (evhtp_request_t *req, void *arg) { json_t *obj; const char *token = NULL; CalObjResult *result = NULL; char **parts; char *repo_id = NULL; HttpServer *htp_server = seaf->http_server->priv; parts = g_strsplit (req->uri->path->full + 1, "/", 0); repo_id = parts[1]; int token_status = validate_token (htp_server, req, repo_id, NULL, FALSE); if (token_status != EVHTP_RES_OK) { evhtp_send_reply (req, token_status); goto out; } token = evhtp_kv_find (req->uri->query, "token"); if (!token || strlen(token)!=FS_ID_LIST_TOKEN_LEN) { evhtp_send_reply (req, EVHTP_RES_BADREQ); goto out; } obj = json_object (); pthread_mutex_lock (&htp_server->fs_obj_ids_lock); result = g_hash_table_lookup (htp_server->fs_obj_ids, token); if (!result) { pthread_mutex_unlock (&htp_server->fs_obj_ids_lock); evhtp_send_reply (req, EVHTP_RES_NOTFOUND); goto out; } else { if (!result->done) { json_object_set_new (obj, "success", json_false()); } else { json_object_set_new (obj, "success", json_true()); } } pthread_mutex_unlock (&htp_server->fs_obj_ids_lock); json_object_set_new (obj, "token", json_string (token)); char *json_str = json_dumps (obj, JSON_COMPACT); evbuffer_add (req->buffer_out, json_str, strlen(json_str)); evhtp_send_reply (req, EVHTP_RES_OK); g_free (json_str); out: if (obj) json_decref (obj); g_strfreev (parts); return; } static void retrieve_fs_obj_id_cb (evhtp_request_t *req, void *arg) { char **parts; const char *token = NULL; char *repo_id = NULL; GList *list = NULL; CalObjResult *result = NULL; HttpServer *htp_server = seaf->http_server->priv; parts = g_strsplit (req->uri->path->full + 1, "/", 0); repo_id = parts[1]; int token_status = validate_token (htp_server, req, repo_id, NULL, FALSE); if (token_status != EVHTP_RES_OK) { evhtp_send_reply (req, token_status); goto out; } token = evhtp_kv_find (req->uri->query, "token"); if (!token || strlen(token)!=FS_ID_LIST_TOKEN_LEN) { evhtp_send_reply (req, EVHTP_RES_BADREQ); goto out; } pthread_mutex_lock (&htp_server->fs_obj_ids_lock); result = g_hash_table_lookup (htp_server->fs_obj_ids, token); if (!result) { pthread_mutex_unlock (&htp_server->fs_obj_ids_lock); evhtp_send_reply (req, EVHTP_RES_NOTFOUND); return; } if (!result->done) { pthread_mutex_unlock (&htp_server->fs_obj_ids_lock); char *error = "The cauculation task is not completed.\n"; evbuffer_add (req->buffer_out, error, strlen(error)); evhtp_send_reply (req, EVHTP_RES_BADREQ); return; } list = result->list; pthread_mutex_unlock (&htp_server->fs_obj_ids_lock); GList *ptr; json_t *obj_array = json_array (); for (ptr = list; ptr; ptr = ptr->next) { json_array_append_new (obj_array, json_string (ptr->data)); g_free (ptr->data); } pthread_mutex_lock (&htp_server->fs_obj_ids_lock); g_hash_table_remove (htp_server->fs_obj_ids, token); pthread_mutex_unlock (&htp_server->fs_obj_ids_lock); char *obj_list = json_dumps (obj_array, JSON_COMPACT); evbuffer_add (req->buffer_out, obj_list, strlen (obj_list)); evhtp_send_reply (req, EVHTP_RES_OK); g_free (obj_list); json_decref (obj_array); out: g_strfreev (parts); return; } static void get_block_cb (evhtp_request_t *req, void *arg) { const char *repo_id = NULL; char *block_id = NULL; char *store_id = NULL; HttpServer *htp_server = seaf->http_server->priv; BlockMetadata *blk_meta = NULL; char *username = NULL; char **parts = g_strsplit (req->uri->path->full + 1, "/", 0); repo_id = parts[1]; block_id = parts[3]; int token_status = validate_token (htp_server, req, repo_id, &username, FALSE); if (token_status != EVHTP_RES_OK) { evhtp_send_reply (req, token_status); goto out; } int perm_status = check_permission (htp_server, repo_id, username, "download", FALSE); if (perm_status != EVHTP_RES_OK) { evhtp_send_reply (req, EVHTP_RES_FORBIDDEN); goto out; } store_id = get_repo_store_id (htp_server, repo_id); if (!store_id) { evhtp_send_reply (req, EVHTP_RES_SERVERR); goto out; } blk_meta = seaf_block_manager_stat_block (seaf->block_mgr, store_id, 1, block_id); if (blk_meta == NULL || blk_meta->size <= 0) { evhtp_send_reply (req, EVHTP_RES_SERVERR); goto out; } BlockHandle *blk_handle = NULL; blk_handle = seaf_block_manager_open_block(seaf->block_mgr, store_id, 1, block_id, BLOCK_READ); if (!blk_handle) { seaf_warning ("Failed to open block %.8s:%s.\n", store_id, block_id); evhtp_send_reply (req, EVHTP_RES_SERVERR); goto out; } void *block_con = g_new0 (char, blk_meta->size); if (!block_con) { evhtp_send_reply (req, EVHTP_RES_SERVERR); seaf_warning ("Failed to allocate %d bytes memeory.\n", blk_meta->size); goto free_handle; } int rsize = seaf_block_manager_read_block (seaf->block_mgr, blk_handle, block_con, blk_meta->size); if (rsize != blk_meta->size) { seaf_warning ("Failed to read block %.8s:%s.\n", store_id, block_id); evhtp_send_reply (req, EVHTP_RES_SERVERR); } else { evbuffer_add (req->buffer_out, block_con, blk_meta->size); evhtp_send_reply (req, EVHTP_RES_OK); } g_free (block_con); send_statistic_msg (store_id, username, "sync-file-download", (guint64)rsize); free_handle: seaf_block_manager_close_block (seaf->block_mgr, blk_handle); seaf_block_manager_block_handle_free (seaf->block_mgr, blk_handle); out: g_free (username); g_free (blk_meta); g_free (store_id); g_strfreev (parts); } static void put_send_block_cb (evhtp_request_t *req, void *arg) { const char *repo_id = NULL; char *block_id = NULL; char *store_id = NULL; char *username = NULL; HttpServer *htp_server = seaf->http_server->priv; char **parts = NULL; void *blk_con = NULL; parts = g_strsplit (req->uri->path->full + 1, "/", 0); repo_id = parts[1]; block_id = parts[3]; int token_status = validate_token (htp_server, req, repo_id, &username, FALSE); if (token_status != EVHTP_RES_OK) { evhtp_send_reply (req, token_status); goto out; } int perm_status = check_permission (htp_server, repo_id, username, "upload", FALSE); if (perm_status == EVHTP_RES_FORBIDDEN) { evhtp_send_reply (req, EVHTP_RES_FORBIDDEN); goto out; } store_id = get_repo_store_id (htp_server, repo_id); if (!store_id) { evhtp_send_reply (req, EVHTP_RES_SERVERR); goto out; } int blk_len = evbuffer_get_length (req->buffer_in); if (blk_len == 0) { evhtp_send_reply (req, EVHTP_RES_BADREQ); goto out; } blk_con = g_new0 (char, blk_len); if (!blk_con) { evhtp_send_reply (req, EVHTP_RES_SERVERR); seaf_warning ("Failed to allocate %d bytes memory.\n", blk_len); goto out; } evbuffer_remove (req->buffer_in, blk_con, blk_len); BlockHandle *blk_handle = NULL; blk_handle = seaf_block_manager_open_block (seaf->block_mgr, store_id, 1, block_id, BLOCK_WRITE); if (blk_handle == NULL) { seaf_warning ("Failed to open block %.8s:%s.\n", store_id, block_id); evhtp_send_reply (req, EVHTP_RES_SERVERR); goto out; } if (seaf_block_manager_write_block (seaf->block_mgr, blk_handle, blk_con, blk_len) != blk_len) { seaf_warning ("Failed to write block %.8s:%s.\n", store_id, block_id); evhtp_send_reply (req, EVHTP_RES_SERVERR); seaf_block_manager_close_block (seaf->block_mgr, blk_handle); seaf_block_manager_block_handle_free (seaf->block_mgr, blk_handle); goto out; } if (seaf_block_manager_close_block (seaf->block_mgr, blk_handle) < 0) { seaf_warning ("Failed to close block %.8s:%s.\n", store_id, block_id); evhtp_send_reply (req, EVHTP_RES_SERVERR); seaf_block_manager_block_handle_free (seaf->block_mgr, blk_handle); goto out; } if (seaf_block_manager_commit_block (seaf->block_mgr, blk_handle) < 0) { seaf_warning ("Failed to commit block %.8s:%s.\n", store_id, block_id); evhtp_send_reply (req, EVHTP_RES_SERVERR); seaf_block_manager_block_handle_free (seaf->block_mgr, blk_handle); goto out; } seaf_block_manager_block_handle_free (seaf->block_mgr, blk_handle); evhtp_send_reply (req, EVHTP_RES_OK); send_statistic_msg (store_id, username, "sync-file-upload", (guint64)blk_len); out: g_free (username); g_free (store_id); g_strfreev (parts); g_free (blk_con); } static void block_oper_cb (evhtp_request_t *req, void *arg) { htp_method req_method = evhtp_request_get_method (req); if (req_method == htp_method_GET) { get_block_cb (req, arg); } else if (req_method == htp_method_PUT) { put_send_block_cb (req, arg); } } static void post_check_exist_cb (evhtp_request_t *req, void *arg, CheckExistType type) { HttpServer *htp_server = seaf->http_server->priv; char **parts = g_strsplit (req->uri->path->full + 1, "/", 0); char *repo_id = parts[1]; char *store_id = NULL; char *username = NULL; int token_status = validate_token (htp_server, req, repo_id, &username, FALSE); if (token_status != EVHTP_RES_OK) { evhtp_send_reply (req, token_status); goto out; } int perm_status = check_permission (htp_server, repo_id, username, "download", FALSE); if (perm_status != EVHTP_RES_OK) { evhtp_send_reply (req, EVHTP_RES_FORBIDDEN); goto out; } store_id = get_repo_store_id (htp_server, repo_id); if (!store_id) { evhtp_send_reply (req, EVHTP_RES_SERVERR); goto out; } size_t list_len = evbuffer_get_length (req->buffer_in); if (list_len == 0) { evhtp_send_reply (req, EVHTP_RES_BADREQ); goto out; } char *obj_list_con = g_new0 (char, list_len); if (!obj_list_con) { evhtp_send_reply (req, EVHTP_RES_SERVERR); seaf_warning ("Failed to allocate %zu bytes memory.\n", list_len); goto out; } json_error_t jerror; evbuffer_remove (req->buffer_in, obj_list_con, list_len); json_t *obj_array = json_loadb (obj_list_con, list_len, 0, &jerror); g_free (obj_list_con); if (!obj_array) { seaf_warning ("dump obj_id to json failed, error: %s\n", jerror.text); evhtp_send_reply (req, EVHTP_RES_BADREQ); return; } json_t *obj = NULL; gboolean ret = TRUE; const char *obj_id = NULL; int index = 0; int array_size = json_array_size (obj_array); json_t *needed_objs = json_array(); for (; index < array_size; ++index) { obj = json_array_get (obj_array, index); obj_id = json_string_value (obj); if (!is_object_id_valid (obj_id)) continue; if (type == CHECK_FS_EXIST) { ret = seaf_fs_manager_object_exists (seaf->fs_mgr, store_id, 1, obj_id); } else if (type == CHECK_BLOCK_EXIST) { ret = seaf_block_manager_block_exists (seaf->block_mgr, store_id, 1, obj_id); } if (!ret) { json_array_append (needed_objs, obj); } } char *ret_array = json_dumps (needed_objs, JSON_COMPACT); evbuffer_add (req->buffer_out, ret_array, strlen (ret_array)); evhtp_send_reply (req, EVHTP_RES_OK); g_free (ret_array); json_decref (needed_objs); json_decref (obj_array); out: g_free (username); g_free (store_id); g_strfreev (parts); } static void post_check_fs_cb (evhtp_request_t *req, void *arg) { post_check_exist_cb (req, arg, CHECK_FS_EXIST); } static void post_check_block_cb (evhtp_request_t *req, void *arg) { post_check_exist_cb (req, arg, CHECK_BLOCK_EXIST); } static void post_recv_fs_cb (evhtp_request_t *req, void *arg) { HttpServer *htp_server = seaf->http_server->priv; char **parts = g_strsplit (req->uri->path->full + 1, "/", 0); const char *repo_id = parts[1]; char *store_id = NULL; char *username = NULL; FsHdr *hdr = NULL; int token_status = validate_token (htp_server, req, repo_id, &username, FALSE); if (token_status != EVHTP_RES_OK) { evhtp_send_reply (req, token_status); goto out; } int perm_status = check_permission (htp_server, repo_id, username, "upload", FALSE); if (perm_status != EVHTP_RES_OK) { evhtp_send_reply (req, EVHTP_RES_FORBIDDEN); goto out; } store_id = get_repo_store_id (htp_server, repo_id); if (!store_id) { evhtp_send_reply (req, EVHTP_RES_SERVERR); goto out; } int fs_con_len = evbuffer_get_length (req->buffer_in); if (fs_con_len < sizeof(FsHdr)) { evhtp_send_reply (req, EVHTP_RES_BADREQ); goto out; } hdr = g_new0 (FsHdr, 1); if (!hdr) { evhtp_send_reply (req, EVHTP_RES_SERVERR); goto out; } char obj_id[41]; void *obj_con = NULL; int con_len; while (fs_con_len > 0) { if (fs_con_len < sizeof(FsHdr)) { seaf_warning ("Bad fs object content format from %.8s:%s.\n", repo_id, username); evhtp_send_reply (req, EVHTP_RES_BADREQ); break; } evbuffer_remove (req->buffer_in, hdr, sizeof(FsHdr)); con_len = ntohl (hdr->obj_size); memcpy (obj_id, hdr->obj_id, 40); obj_id[40] = 0; if (!is_object_id_valid (obj_id)) { evhtp_send_reply (req, EVHTP_RES_BADREQ); break; } obj_con = g_new0 (char, con_len); if (!obj_con) { evhtp_send_reply (req, EVHTP_RES_SERVERR); break; } evbuffer_remove (req->buffer_in, obj_con, con_len); if (seaf_obj_store_write_obj (seaf->fs_mgr->obj_store, store_id, 1, obj_id, obj_con, con_len, FALSE) < 0) { seaf_warning ("Failed to write fs object %.8s to disk.\n", obj_id); g_free (obj_con); evhtp_send_reply (req, EVHTP_RES_SERVERR); break; } fs_con_len -= (con_len + sizeof(FsHdr)); g_free (obj_con); } if (fs_con_len == 0) { evhtp_send_reply (req, EVHTP_RES_OK); } out: g_free (store_id); g_free (hdr); g_free (username); g_strfreev (parts); } #define MAX_OBJECT_PACK_SIZE (1 << 20) /* 1MB */ static void post_pack_fs_cb (evhtp_request_t *req, void *arg) { HttpServer *htp_server = seaf->http_server->priv; char **parts = g_strsplit (req->uri->path->full + 1, "/", 0); const char *repo_id = parts[1]; char *store_id = NULL; char *username = NULL; int token_status = validate_token (htp_server, req, repo_id, &username, FALSE); if (token_status != EVHTP_RES_OK) { evhtp_send_reply (req, token_status); goto out; } int perm_status = check_permission (htp_server, repo_id, username, "download", FALSE); if (perm_status != EVHTP_RES_OK) { evhtp_send_reply (req, EVHTP_RES_FORBIDDEN); goto out; } store_id = get_repo_store_id (htp_server, repo_id); if (!store_id) { evhtp_send_reply (req, EVHTP_RES_SERVERR); goto out; } int fs_id_list_len = evbuffer_get_length (req->buffer_in); if (fs_id_list_len == 0) { evhtp_send_reply (req, EVHTP_RES_BADREQ); goto out; } char *fs_id_list = g_new0 (char, fs_id_list_len); if (!fs_id_list) { evhtp_send_reply (req, EVHTP_RES_SERVERR); seaf_warning ("Failed to allocate %d bytes memory.\n", fs_id_list_len); goto out; } json_error_t jerror; evbuffer_remove (req->buffer_in, fs_id_list, fs_id_list_len); json_t *fs_id_array = json_loadb (fs_id_list, fs_id_list_len, 0, &jerror); g_free (fs_id_list); if (!fs_id_array) { seaf_warning ("dump fs obj_id from json failed, error: %s\n", jerror.text); evhtp_send_reply (req, EVHTP_RES_BADREQ); goto out; } json_t *obj = NULL; const char *obj_id = NULL; int index = 0; void *fs_data = NULL; int data_len; int data_len_net; int total_size = 0; int array_size = json_array_size (fs_id_array); for (; index < array_size; ++index) { obj = json_array_get (fs_id_array, index); obj_id = json_string_value (obj); if (!is_object_id_valid (obj_id)) { seaf_warning ("Invalid fs id %s.\n", obj_id); evhtp_send_reply (req, EVHTP_RES_BADREQ); json_decref (fs_id_array); goto out; } if (seaf_obj_store_read_obj (seaf->fs_mgr->obj_store, store_id, 1, obj_id, &fs_data, &data_len) < 0) { seaf_warning ("Failed to read seafile object %s:%s.\n", store_id, obj_id); evhtp_send_reply (req, EVHTP_RES_SERVERR); json_decref (fs_id_array); goto out; } evbuffer_add (req->buffer_out, obj_id, 40); data_len_net = htonl (data_len); evbuffer_add (req->buffer_out, &data_len_net, 4); evbuffer_add (req->buffer_out, fs_data, data_len); total_size += data_len; g_free (fs_data); if (total_size >= MAX_OBJECT_PACK_SIZE) break; } evhtp_send_reply (req, EVHTP_RES_OK); json_decref (fs_id_array); out: g_free (username); g_free (store_id); g_strfreev (parts); } static void get_block_map_cb (evhtp_request_t *req, void *arg) { const char *repo_id = NULL; char *file_id = NULL; char *store_id = NULL; HttpServer *htp_server = seaf->http_server->priv; Seafile *file = NULL; char *block_id; BlockMetadata *blk_meta = NULL; json_t *array = NULL; char *data = NULL; char *username = NULL; char **parts = g_strsplit (req->uri->path->full + 1, "/", 0); repo_id = parts[1]; file_id = parts[3]; int token_status = validate_token (htp_server, req, repo_id, &username, FALSE); if (token_status != EVHTP_RES_OK) { evhtp_send_reply (req, token_status); goto out; } int perm_status = check_permission (htp_server, repo_id, username, "download", FALSE); if (perm_status != EVHTP_RES_OK) { evhtp_send_reply (req, EVHTP_RES_FORBIDDEN); goto out; } store_id = get_repo_store_id (htp_server, repo_id); if (!store_id) { evhtp_send_reply (req, EVHTP_RES_SERVERR); goto out; } file = seaf_fs_manager_get_seafile (seaf->fs_mgr, store_id, 1, file_id); if (!file) { evhtp_send_reply (req, EVHTP_RES_NOTFOUND); goto out; } array = json_array (); int i; for (i = 0; i < file->n_blocks; ++i) { block_id = file->blk_sha1s[i]; blk_meta = seaf_block_manager_stat_block (seaf->block_mgr, store_id, 1, block_id); if (blk_meta == NULL) { seaf_warning ("Failed to find block %s/%s\n", store_id, block_id); evhtp_send_reply (req, EVHTP_RES_SERVERR); g_free (blk_meta); goto out; } json_array_append_new (array, json_integer(blk_meta->size)); g_free (blk_meta); } data = json_dumps (array, JSON_COMPACT); evbuffer_add (req->buffer_out, data, strlen (data)); evhtp_send_reply (req, EVHTP_RES_OK); out: g_free (username); g_free (store_id); seafile_unref (file); if (array) json_decref (array); if (data) free (data); g_strfreev (parts); } static void get_jwt_token_cb (evhtp_request_t *req, void *arg) { const char *repo_id = NULL; HttpServer *htp_server = seaf->http_server->priv; json_t *obj = NULL; char *data = NULL; char *username = NULL; char *jwt_token = NULL; char **parts = g_strsplit (req->uri->path->full + 1, "/", 0); repo_id = parts[1]; int token_status = validate_token (htp_server, req, repo_id, &username, FALSE); if (token_status != EVHTP_RES_OK) { evhtp_send_reply (req, token_status); goto out; } if (!seaf->notif_mgr) { evhtp_send_reply (req, EVHTP_RES_NOTFOUND); goto out; } jwt_token = seaf_gen_notif_server_jwt (repo_id, username); if (!jwt_token) { seaf_warning ("Failed to gen jwt token for repo %s\n", repo_id); evhtp_send_reply (req, EVHTP_RES_SERVERR); goto out; } obj = json_object (); json_object_set_new (obj, "jwt_token", json_string (jwt_token)); data = json_dumps (obj, JSON_COMPACT); evbuffer_add (req->buffer_out, data, strlen (data)); evhtp_send_reply (req, EVHTP_RES_OK); out: g_free (jwt_token); g_free (username); if (obj) json_decref (obj); if (data) free (data); g_strfreev (parts); } static json_t * fill_obj_from_seafilerepo (SeafileRepo *srepo, GHashTable *table) { int version = 0; char *repo_id = NULL; char *commit_id = NULL; char *repo_name = NULL; char *permission = NULL; char *owner = NULL; char *type = NULL; gint64 last_modify = 0; json_t *obj = NULL; g_object_get (srepo, "version", &version, "id", &repo_id, "head_cmmt_id", &commit_id, "name", &repo_name, "last_modify", &last_modify, "permission", &permission, "user", &owner, "repo_type", &type, NULL); if (!repo_id) goto out; if (type) { g_free (repo_id); goto out; } //the repo_id will be free when the table is destroyed. if (g_hash_table_lookup (table, repo_id)) { g_free (repo_id); goto out; } g_hash_table_insert (table, repo_id, repo_id); obj = json_object (); json_object_set_new (obj, "version", json_integer (version)); json_object_set_new (obj, "id", json_string (repo_id)); json_object_set_new (obj, "head_commit_id", json_string (commit_id)); json_object_set_new (obj, "name", json_string (repo_name)); json_object_set_new (obj, "mtime", json_integer (last_modify)); json_object_set_new (obj, "permission", json_string (permission)); json_object_set_new (obj, "owner", json_string (owner)); out: g_free (commit_id); g_free (repo_name); g_free (permission); g_free (owner); g_free (type); return obj; } static GHashTable * filter_group_repos (GList *repos) { if (!repos) return NULL; SeafileRepo *srepo = NULL; SeafileRepo *srepo_tmp = NULL; GList *iter; GHashTable *table = NULL; char *permission = NULL; char *permission_prev = NULL; char *repo_id = NULL; char *type = NULL; table = g_hash_table_new_full (g_str_hash, g_str_equal, g_free, NULL); for (iter = repos; iter; iter = iter->next) { srepo = iter->data; g_object_get (srepo, "id", &repo_id, "permission", &permission, "repo_type", &type, NULL); if (type) { g_free (repo_id); g_free (permission); g_free (type); g_object_unref (srepo); continue; } srepo_tmp = g_hash_table_lookup (table, repo_id); if (srepo_tmp) { g_object_get (srepo_tmp, "permission", &permission_prev, NULL); if (g_strcmp0 (permission, "rw") == 0 && g_strcmp0 (permission_prev, "r") == 0) { g_object_unref (srepo_tmp); g_hash_table_remove (table, repo_id); g_hash_table_insert (table, g_strdup (repo_id), srepo); } else { g_object_unref (srepo); } g_free (permission_prev); } else { g_hash_table_insert (table, g_strdup (repo_id), srepo); } g_free (repo_id); g_free (permission); g_free (type); } return table; } static void group_repos_to_json (json_t *repo_array, GHashTable *group_repos, GHashTable *obtained_repos) { GHashTableIter iter; gpointer key, value; SeafileRepo *srepo = NULL; json_t *obj; g_hash_table_iter_init (&iter, group_repos); while (g_hash_table_iter_next (&iter, &key, &value)) { srepo = value; obj = fill_obj_from_seafilerepo (srepo, obtained_repos); if (!obj) { g_object_unref (srepo); continue; } json_object_set_new (obj, "type", json_string ("grepo")); json_array_append_new (repo_array, obj); g_object_unref (srepo); } } static void get_accessible_repo_list_cb (evhtp_request_t *req, void *arg) { GList *iter; HttpServer *htp_server = seaf->http_server->priv; SeafRepo *repo = NULL; char *user = NULL; GList *repos = NULL; int org_id = -1; const char *repo_id = evhtp_kv_find (req->uri->query, "repo_id"); if (!repo_id || !is_uuid_valid (repo_id)) { evhtp_send_reply (req, EVHTP_RES_BADREQ); seaf_warning ("Invalid repo id.\n"); return; } int token_status = validate_token (htp_server, req, repo_id, &user, FALSE); if (token_status != EVHTP_RES_OK) { evhtp_send_reply (req, token_status); return; } json_t *obj; json_t *repo_array = json_array (); gboolean db_err = FALSE; GHashTable *obtained_repos = NULL; char *repo_id_tmp = NULL; obtained_repos = g_hash_table_new_full (g_str_hash, g_str_equal, g_free, NULL); //get personal repo list repos = seaf_repo_manager_get_repos_by_owner (seaf->repo_mgr, user, 0, -1, -1, &db_err); if (db_err) goto out; for (iter = repos; iter; iter = iter->next) { repo = iter->data; if (repo->type) { seaf_repo_unref (repo); continue; } if (!repo->is_corrupted) { if (!g_hash_table_lookup (obtained_repos, repo->id)) { repo_id_tmp = g_strdup (repo->id); g_hash_table_insert (obtained_repos, repo_id_tmp, repo_id_tmp); } obj = json_object (); json_object_set_new (obj, "version", json_integer (repo->version)); json_object_set_new (obj, "id", json_string (repo->id)); json_object_set_new (obj, "head_commit_id", json_string (repo->head->commit_id)); json_object_set_new (obj, "name", json_string (repo->name)); json_object_set_new (obj, "mtime", json_integer (repo->last_modify)); json_object_set_new (obj, "permission", json_string ("rw")); json_object_set_new (obj, "type", json_string ("repo")); json_object_set_new (obj, "owner", json_string (user)); json_array_append_new (repo_array, obj); } seaf_repo_unref (repo); } g_list_free (repos); GError *error = NULL; SeafileRepo *srepo = NULL; //get shared repo list repos = seaf_share_manager_list_share_repos (seaf->share_mgr, user, "to_email", -1, -1, &db_err); if (db_err) goto out; for (iter = repos; iter; iter = iter->next) { srepo = iter->data; obj = fill_obj_from_seafilerepo (srepo, obtained_repos); if (!obj) { g_object_unref (srepo); continue; } json_object_set_new (obj, "type", json_string ("srepo")); json_array_append_new (repo_array, obj); g_object_unref (srepo); } g_list_free (repos); //get group repo list GHashTable *group_repos = NULL; repos = seaf_get_group_repos_by_user (seaf->repo_mgr, user, org_id, &error); if (error) { g_clear_error (&error); goto out; } if (repos) { group_repos = filter_group_repos (repos); group_repos_to_json (repo_array, group_repos, obtained_repos); g_hash_table_destroy (group_repos); g_list_free (repos); } //get inner public repo list repos = seaf_repo_manager_list_inner_pub_repos (seaf->repo_mgr, &db_err); if (db_err) goto out; for (iter = repos; iter; iter = iter->next) { srepo = iter->data; obj = fill_obj_from_seafilerepo (srepo, obtained_repos); if (!obj) { g_object_unref (srepo); continue; } json_object_set_new (obj, "type", json_string ("grepo")); json_object_set_new (obj, "owner", json_string ("Organization")); json_array_append_new (repo_array, obj); g_object_unref (srepo); } g_list_free (repos); out: g_free (user); g_hash_table_destroy (obtained_repos); if (db_err) { json_decref (repo_array); seaf_warning ("DB error when get accessible repo list.\n"); evhtp_send_reply (req, EVHTP_RES_SERVERR); return; } char *json_str = json_dumps (repo_array, JSON_COMPACT); evbuffer_add (req->buffer_out, json_str, strlen(json_str)); evhtp_send_reply (req, EVHTP_RES_OK); g_free (json_str); json_decref (repo_array); } static evhtp_res http_request_finish_cb (evhtp_request_t *req, void *arg) { RequestInfo *info = arg; struct timeval end, intv; seaf_metric_manager_in_flight_request_dec (seaf->metric_mgr); if (!info) return EVHTP_RES_OK; g_free (info->url_path); g_free (info); return EVHTP_RES_OK; } static evhtp_res http_request_start_cb (evhtp_request_t *req, evhtp_headers_t *hdr, void *arg) { RequestInfo *info = NULL; info = g_new0 (RequestInfo, 1); info->url_path = g_strdup (req->uri->path->full); gettimeofday (&info->start, NULL); seaf_metric_manager_in_flight_request_inc (seaf->metric_mgr); evhtp_set_hook (&req->hooks, evhtp_hook_on_request_fini, http_request_finish_cb, info); req->cbarg = info; return EVHTP_RES_OK; } static void http_request_init (HttpServerStruct *server) { HttpServer *priv = server->priv; evhtp_callback_t *cb; cb = evhtp_set_cb (priv->evhtp, GET_PROTO_PATH, get_protocol_cb, NULL); evhtp_set_hook(&cb->hooks, evhtp_hook_on_headers, http_request_start_cb, NULL); cb = evhtp_set_regex_cb (priv->evhtp, GET_CHECK_QUOTA_REGEX, get_check_quota_cb, NULL); evhtp_set_hook(&cb->hooks, evhtp_hook_on_headers, http_request_start_cb, NULL); cb = evhtp_set_regex_cb (priv->evhtp, OP_PERM_CHECK_REGEX, get_check_permission_cb, NULL); evhtp_set_hook(&cb->hooks, evhtp_hook_on_headers, http_request_start_cb, NULL); cb = evhtp_set_regex_cb (priv->evhtp, HEAD_COMMIT_OPER_REGEX, head_commit_oper_cb, NULL); evhtp_set_hook(&cb->hooks, evhtp_hook_on_headers, http_request_start_cb, NULL); cb = evhtp_set_regex_cb (priv->evhtp, GET_HEAD_COMMITS_MULTI_REGEX, head_commits_multi_cb, NULL); evhtp_set_hook(&cb->hooks, evhtp_hook_on_headers, http_request_start_cb, NULL); cb = evhtp_set_regex_cb (priv->evhtp, COMMIT_OPER_REGEX, commit_oper_cb, NULL); evhtp_set_hook(&cb->hooks, evhtp_hook_on_headers, http_request_start_cb, NULL); cb = evhtp_set_regex_cb (priv->evhtp, GET_FS_OBJ_ID_REGEX, get_fs_obj_id_cb, NULL); evhtp_set_hook(&cb->hooks, evhtp_hook_on_headers, http_request_start_cb, NULL); // evhtp_set_regex_cb (priv->evhtp, // START_FS_OBJ_ID_REGEX, start_fs_obj_id_cb, // priv); // evhtp_set_regex_cb (priv->evhtp, // QUERY_FS_OBJ_ID_REGEX, query_fs_obj_id_cb, // priv); // evhtp_set_regex_cb (priv->evhtp, // RETRIEVE_FS_OBJ_ID_REGEX, retrieve_fs_obj_id_cb, // priv); cb = evhtp_set_regex_cb (priv->evhtp, BLOCK_OPER_REGEX, block_oper_cb, NULL); evhtp_set_hook(&cb->hooks, evhtp_hook_on_headers, http_request_start_cb, NULL); cb = evhtp_set_regex_cb (priv->evhtp, POST_CHECK_FS_REGEX, post_check_fs_cb, NULL); evhtp_set_hook(&cb->hooks, evhtp_hook_on_headers, http_request_start_cb, NULL); cb = evhtp_set_regex_cb (priv->evhtp, POST_CHECK_BLOCK_REGEX, post_check_block_cb, NULL); evhtp_set_hook(&cb->hooks, evhtp_hook_on_headers, http_request_start_cb, NULL); cb = evhtp_set_regex_cb (priv->evhtp, POST_RECV_FS_REGEX, post_recv_fs_cb, NULL); evhtp_set_hook(&cb->hooks, evhtp_hook_on_headers, http_request_start_cb, NULL); cb = evhtp_set_regex_cb (priv->evhtp, POST_PACK_FS_REGEX, post_pack_fs_cb, NULL); evhtp_set_hook(&cb->hooks, evhtp_hook_on_headers, http_request_start_cb, NULL); cb = evhtp_set_regex_cb (priv->evhtp, GET_BLOCK_MAP_REGEX, get_block_map_cb, NULL); evhtp_set_hook(&cb->hooks, evhtp_hook_on_headers, http_request_start_cb, NULL); cb = evhtp_set_regex_cb (priv->evhtp, GET_JWT_TOKEN_REGEX, get_jwt_token_cb, NULL); evhtp_set_hook(&cb->hooks, evhtp_hook_on_headers, http_request_start_cb, NULL); cb = evhtp_set_regex_cb (priv->evhtp, GET_ACCESSIBLE_REPO_LIST_REGEX, get_accessible_repo_list_cb, NULL); evhtp_set_hook(&cb->hooks, evhtp_hook_on_headers, http_request_start_cb, NULL); /* Web access file */ access_file_init (priv->evhtp); /* Web upload file */ if (upload_file_init (priv->evhtp, server->http_temp_dir) < 0) exit(-1); } static void token_cache_value_free (gpointer data) { TokenInfo *token_info = (TokenInfo *)data; if (token_info != NULL) { g_free (token_info->repo_id); g_free (token_info->email); g_free (token_info); } } static gboolean is_token_expire (gpointer key, gpointer value, gpointer arg) { TokenInfo *token_info = (TokenInfo *)value; if(token_info && token_info->expire_time <= (gint64)time(NULL)) { return TRUE; } return FALSE; } static void perm_cache_value_free (gpointer data) { PermInfo *perm_info = data; g_free (perm_info); } static gboolean is_perm_expire (gpointer key, gpointer value, gpointer arg) { PermInfo *perm_info = (PermInfo *)value; if(perm_info && perm_info->expire_time <= (gint64)time(NULL)) { return TRUE; } return FALSE; } static gboolean is_vir_repo_info_expire (gpointer key, gpointer value, gpointer arg) { VirRepoInfo *vinfo = (VirRepoInfo *)value; if(vinfo && vinfo->expire_time <= (gint64)time(NULL)) { return TRUE; } return FALSE; } static void free_vir_repo_info (gpointer data) { if (!data) return; VirRepoInfo *vinfo = data; if (vinfo->store_id) g_free (vinfo->store_id); g_free (vinfo); } static void remove_expire_cache_cb (evutil_socket_t sock, short type, void *data) { HttpServer *htp_server = data; pthread_mutex_lock (&htp_server->token_cache_lock); g_hash_table_foreach_remove (htp_server->token_cache, is_token_expire, NULL); pthread_mutex_unlock (&htp_server->token_cache_lock); pthread_mutex_lock (&htp_server->perm_cache_lock); g_hash_table_foreach_remove (htp_server->perm_cache, is_perm_expire, NULL); pthread_mutex_unlock (&htp_server->perm_cache_lock); pthread_mutex_lock (&htp_server->vir_repo_info_cache_lock); g_hash_table_foreach_remove (htp_server->vir_repo_info_cache, is_vir_repo_info_expire, NULL); pthread_mutex_unlock (&htp_server->vir_repo_info_cache_lock); } static void * http_server_run (void *arg) { HttpServerStruct *server = arg; HttpServer *priv = server->priv; priv->evbase = event_base_new(); priv->evhtp = evhtp_new(priv->evbase, NULL); if (evhtp_bind_socket(priv->evhtp, server->bind_addr, server->bind_port, 128) < 0) { seaf_warning ("Could not bind socket: %s\n", strerror (errno)); exit(-1); } http_request_init (server); evhtp_use_threads (priv->evhtp, NULL, server->worker_threads, NULL); struct timeval tv; tv.tv_sec = CLEANING_INTERVAL_SEC; tv.tv_usec = 0; priv->reap_timer = event_new (priv->evbase, -1, EV_PERSIST, remove_expire_cache_cb, priv); evtimer_add (priv->reap_timer, &tv); event_base_loop (priv->evbase, 0); return NULL; } HttpServerStruct * seaf_http_server_new (struct _SeafileSession *session) { HttpServerStruct *server = g_new0 (HttpServerStruct, 1); HttpServer *priv = g_new0 (HttpServer, 1); priv->evbase = NULL; priv->evhtp = NULL; load_http_config (server, session); priv->token_cache = g_hash_table_new_full (g_str_hash, g_str_equal, g_free, token_cache_value_free); pthread_mutex_init (&priv->token_cache_lock, NULL); priv->perm_cache = g_hash_table_new_full (g_str_hash, g_str_equal, g_free, perm_cache_value_free); pthread_mutex_init (&priv->perm_cache_lock, NULL); priv->vir_repo_info_cache = g_hash_table_new_full (g_str_hash, g_str_equal, g_free, free_vir_repo_info); pthread_mutex_init (&priv->vir_repo_info_cache_lock, NULL); server->http_temp_dir = g_build_filename (session->seaf_dir, "httptemp", NULL); // priv->compute_fs_obj_id_pool = g_thread_pool_new (compute_fs_obj_id, NULL, // FS_ID_LIST_MAX_WORKERS, FALSE, NULL); // priv->fs_obj_ids = g_hash_table_new_full (g_str_hash, g_str_equal, // g_free, free_obj_cal_result); // pthread_mutex_init (&priv->fs_obj_ids_lock, NULL); server->seaf_session = session; server->priv = priv; return server; } gint64 get_last_modify_time (const char *path) { struct stat st; if (stat (path, &st) < 0) { return -1; } return st.st_mtime; } static gint64 check_httptemp_dir_recursive (const char *parent_dir, gint64 expired_time) { char *full_path; const char *dname; gint64 cur_time; gint64 last_modify = -1; GDir *dir = NULL; gint64 file_num = 0; dir = g_dir_open (parent_dir, 0, NULL); while ((dname = g_dir_read_name(dir)) != NULL) { full_path = g_build_path ("/", parent_dir, dname, NULL); if (g_file_test (full_path, G_FILE_TEST_IS_DIR)) { file_num += check_httptemp_dir_recursive (full_path, expired_time); } else { cur_time = time (NULL); last_modify = get_last_modify_time (full_path); if (last_modify == -1) { g_free (full_path); continue; } /*remove blokc cache from local*/ if (last_modify + expired_time <= cur_time) { g_unlink (full_path); file_num ++; } } g_free (full_path); } g_dir_close (dir); return file_num; } static int scan_httptemp_dir (const char *httptemp_dir, gint64 expired_time) { return check_httptemp_dir_recursive (httptemp_dir, expired_time); } static void * cleanup_expired_httptemp_file (void *arg) { GError *error = NULL; HttpServerStruct *server = arg; SeafileSession *session = server->seaf_session; gint64 ttl = 0; gint64 scan_interval = 0; gint64 file_num = 0; ttl = fileserver_config_get_int64 (session->config, HTTP_TEMP_FILE_TTL, &error); if (error) { ttl = HTTP_TEMP_FILE_DEFAULT_TTL; g_clear_error (&error); } scan_interval = fileserver_config_get_int64 (session->config, HTTP_SCAN_INTERVAL, &error); if (error) { scan_interval = HTTP_TEMP_FILE_SCAN_INTERVAL; g_clear_error (&error); } while (TRUE) { sleep (scan_interval); file_num = scan_httptemp_dir (server->http_temp_dir, ttl); if (file_num) { seaf_message ("Clean up %ld http temp files\n", file_num); file_num = 0; } } return NULL; } int seaf_http_server_start (HttpServerStruct *server) { int ret = pthread_create (&server->priv->thread_id, NULL, http_server_run, server); if (ret != 0) return -1; pthread_detach (server->priv->thread_id); pthread_t tid; ret = pthread_create (&tid, NULL, cleanup_expired_httptemp_file, server); if (ret != 0) return -1; pthread_detach (tid); return 0; } int seaf_http_server_invalidate_tokens (HttpServerStruct *htp_server, const GList *tokens) { const GList *p; pthread_mutex_lock (&htp_server->priv->token_cache_lock); for (p = tokens; p; p = p->next) { const char *token = (char *)p->data; g_hash_table_remove (htp_server->priv->token_cache, token); } pthread_mutex_unlock (&htp_server->priv->token_cache_lock); return 0; } #endif ================================================ FILE: server/http-server.h ================================================ #ifndef HTTP_SERVER_H #define HTTP_SERVER_H #ifdef HAVE_EVHTP #include #include "metric-mgr.h" struct _SeafileSession; struct _HttpServer; struct _HttpServerStruct { struct _SeafileSession *seaf_session; struct _HttpServer *priv; char *bind_addr; int bind_port; char *http_temp_dir; /* temp dir for file upload */ char *windows_encoding; int worker_threads; int cluster_shared_temp_file_mode; gboolean verify_client_blocks; }; typedef struct RequestInfo { struct timeval start; char *url_path; } RequestInfo; typedef struct _HttpServerStruct HttpServerStruct; HttpServerStruct * seaf_http_server_new (struct _SeafileSession *session); int seaf_http_server_start (HttpServerStruct *htp_server); int seaf_http_server_invalidate_tokens (HttpServerStruct *htp_server, const GList *tokens); void send_statistic_msg (const char *repo_id, char *user, char *operation, guint64 bytes); char * get_client_ip_addr (void *data); #endif #endif ================================================ FILE: server/http-status-codes.h ================================================ #ifndef HTTP_STATUS_CODES_H #define HTTP_STATUS_CODES_H /* Seafile specific http status codes. */ #define SEAF_HTTP_RES_FORBIDDEN 403 #define SEAF_HTTP_RES_BADFILENAME 440 #define SEAF_HTTP_RES_EXISTS 441 #define SEAF_HTTP_RES_NOT_EXISTS 441 #define SEAF_HTTP_RES_TOOLARGE 442 #define SEAF_HTTP_RES_NOQUOTA 443 #define SEAF_HTTP_RES_REPO_DELETED 444 #define SEAF_HTTP_RES_REPO_CORRUPTED 445 #define SEAF_HTTP_RES_BLOCK_MISSING 446 #endif ================================================ FILE: server/http-tx-mgr.c ================================================ #include "common.h" #include #include #include #include #include #include "seafile-session.h" #include "http-tx-mgr.h" #include "utils.h" #include "seaf-db.h" #include "seafile-error.h" #define DEBUG_FLAG SEAFILE_DEBUG_TRANSFER #include "log.h" /* Http connection and connection pool. */ struct _Connection { CURL *curl; gint64 ctime; /* Used to clean up unused connection. */ gboolean release; /* If TRUE, the connection will be released. */ }; struct _ConnectionPool { GQueue *queue; pthread_mutex_t lock; }; static Connection * connection_new () { Connection *conn = g_new0 (Connection, 1); if (!conn) return NULL; conn->curl = curl_easy_init(); conn->ctime = (gint64)time(NULL); return conn; } static void connection_free (Connection *conn) { if (!conn) return; curl_easy_cleanup (conn->curl); g_free (conn); } ConnectionPool * connection_pool_new () { ConnectionPool *pool = g_new0 (ConnectionPool, 1); if (!pool) return NULL; pool->queue = g_queue_new (); pthread_mutex_init (&pool->lock, NULL); return pool; } void connection_pool_free (ConnectionPool *pool) { if (!pool) return; g_queue_free (pool->queue); g_free (pool); } Connection * connection_pool_get_connection (ConnectionPool *pool) { Connection *conn = NULL; pthread_mutex_lock (&pool->lock); conn = g_queue_pop_head (pool->queue); if (!conn) { conn = connection_new (); } pthread_mutex_unlock (&pool->lock); return conn; } void connection_pool_return_connection (ConnectionPool *pool, Connection *conn) { if (!conn) return; if (conn->release) { connection_free (conn); return; } curl_easy_reset (conn->curl); pthread_mutex_lock (&pool->lock); g_queue_push_tail (pool->queue, conn); pthread_mutex_unlock (&pool->lock); } char* http_code_to_str (int http_code) { switch (http_code) { case HTTP_OK: return "Successful"; case HTTP_BAD_REQUEST: return "Bad request"; case HTTP_FORBIDDEN: return "Permission denied"; case HTTP_NOT_FOUND: return "Resource not found"; } if (http_code >= HTTP_INTERNAL_SERVER_ERROR) return "Internal server error"; return "Unknown error"; } void http_tx_manager_init () { curl_global_init (CURL_GLOBAL_ALL); } typedef struct _HttpResponse { char *content; size_t size; } HttpResponse; static size_t recv_response (void *contents, size_t size, size_t nmemb, void *userp) { size_t realsize = size * nmemb; HttpResponse *rsp = userp; rsp->content = g_realloc (rsp->content, rsp->size + realsize); if (!rsp->content) { seaf_warning ("Not enough memory.\n"); /* return a value other than realsize to signify an error. */ return 0; } memcpy (rsp->content + rsp->size, contents, realsize); rsp->size += realsize; return realsize; } #define HTTP_TIMEOUT_SEC 45 /* * The @timeout parameter is for detecting network connection problems. * The @timeout parameter should be set to TRUE for data-transfer-only operations, * such as getting objects, blocks. For operations that requires calculations * on the server side, the timeout should be set to FALSE. Otherwise when * the server sometimes takes more than 45 seconds to calculate the result, * the client will time out. */ static int http_get_common (CURL *curl, const char *url, struct curl_slist **headers, const char *token, int *rsp_status, char **rsp_content, gint64 *rsp_size, HttpRecvCallback callback, void *cb_data, gboolean timeout) { int ret = 0; if (token) { char *token_header = g_strdup_printf ("Authorization: Token %s", token); *headers = curl_slist_append (*headers, token_header); g_free (token_header); } *headers = curl_slist_append (*headers, "User-Agent: Seafile Server"); *headers = curl_slist_append (*headers, "Content-Type: application/json"); curl_easy_setopt(curl, CURLOPT_HTTPHEADER, *headers); curl_easy_setopt(curl, CURLOPT_URL, url); curl_easy_setopt(curl, CURLOPT_NOSIGNAL, 1L); if (timeout) { /* Set low speed limit to 1 bytes. This effectively means no data. */ curl_easy_setopt(curl, CURLOPT_LOW_SPEED_LIMIT, 1); curl_easy_setopt(curl, CURLOPT_LOW_SPEED_TIME, HTTP_TIMEOUT_SEC); } /*if (seaf->disable_verify_certificate) { curl_easy_setopt (curl, CURLOPT_SSL_VERIFYPEER, 0L); curl_easy_setopt (curl, CURLOPT_SSL_VERIFYHOST, 0L); }*/ HttpResponse rsp; memset (&rsp, 0, sizeof(rsp)); if (rsp_content) { curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, recv_response); curl_easy_setopt(curl, CURLOPT_WRITEDATA, &rsp); } else if (callback) { curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, callback); curl_easy_setopt(curl, CURLOPT_WRITEDATA, cb_data); } /*gboolean is_https = (strncasecmp(url, "https", strlen("https")) == 0); set_proxy (curl, is_https);*/ curl_easy_setopt(curl, CURLOPT_FOLLOWLOCATION, 1L); int rc = curl_easy_perform (curl); if (rc != 0) { seaf_warning ("libcurl failed to GET %s: %s.\n", url, curl_easy_strerror(rc)); ret = -1; goto out; } long status; rc = curl_easy_getinfo (curl, CURLINFO_RESPONSE_CODE, &status); if (rc != CURLE_OK) { seaf_warning ("Failed to get status code for GET %s.\n", url); ret = -1; goto out; } *rsp_status = status; if (rsp_content) { *rsp_content = rsp.content; *rsp_size = rsp.size; } out: if (ret < 0) { g_free (rsp.content); } return ret; } typedef struct _HttpRequest { const char *content; size_t size; } HttpRequest; static size_t send_request (void *ptr, size_t size, size_t nmemb, void *userp) { size_t realsize = size *nmemb; size_t copy_size; HttpRequest *req = userp; if (req->size == 0) return 0; copy_size = MIN(req->size, realsize); memcpy (ptr, req->content, copy_size); req->size -= copy_size; req->content = req->content + copy_size; return copy_size; } static int http_post_common (CURL *curl, const char *url, struct curl_slist **headers, const char *token, const char *req_content, gint64 req_size, int *rsp_status, char **rsp_content, gint64 *rsp_size, gboolean timeout, int timeout_sec) { int ret = 0; if (token) { char *token_header = g_strdup_printf ("Authorization: Token %s", token); *headers = curl_slist_append (*headers, token_header); g_free (token_header); } *headers = curl_slist_append (*headers, "User-Agent: Seafile Server"); *headers = curl_slist_append (*headers, "Content-Type: application/json"); curl_easy_setopt(curl, CURLOPT_HTTPHEADER, *headers); curl_easy_setopt(curl, CURLOPT_URL, url); curl_easy_setopt(curl, CURLOPT_POST, 1L); if (timeout) { /* Set low speed limit to 1 bytes. This effectively means no data. */ curl_easy_setopt(curl, CURLOPT_LOW_SPEED_LIMIT, 1); curl_easy_setopt(curl, CURLOPT_LOW_SPEED_TIME, timeout_sec); } /*if (seaf->disable_verify_certificate) { curl_easy_setopt (curl, CURLOPT_SSL_VERIFYPEER, 0L); curl_easy_setopt (curl, CURLOPT_SSL_VERIFYHOST, 0L); }*/ HttpRequest req; if (req_content) { memset (&req, 0, sizeof(req)); req.content = req_content; req.size = req_size; curl_easy_setopt(curl, CURLOPT_READFUNCTION, send_request); curl_easy_setopt(curl, CURLOPT_READDATA, &req); } curl_easy_setopt(curl, CURLOPT_POSTFIELDSIZE_LARGE, (curl_off_t)req_size); curl_easy_setopt(curl, CURLOPT_NOSIGNAL, 1L); HttpResponse rsp; memset (&rsp, 0, sizeof(rsp)); if (rsp_content) { curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, recv_response); curl_easy_setopt(curl, CURLOPT_WRITEDATA, &rsp); } /*gboolean is_https = (strncasecmp(url, "https", strlen("https")) == 0); set_proxy (curl, is_https);*/ curl_easy_setopt(curl, CURLOPT_FOLLOWLOCATION, 1L); /* All POST requests should remain POST after redirect. */ curl_easy_setopt(curl, CURLOPT_POSTREDIR, CURL_REDIR_POST_ALL); int rc = curl_easy_perform (curl); if (rc != 0) { seaf_warning ("libcurl failed to POST %s: %s.\n", url, curl_easy_strerror(rc)); ret = -1; goto out; } long status; rc = curl_easy_getinfo (curl, CURLINFO_RESPONSE_CODE, &status); if (rc != CURLE_OK) { seaf_warning ("Failed to get status code for POST %s.\n", url); ret = -1; goto out; } *rsp_status = status; if (rsp_content) { *rsp_content = rsp.content; *rsp_size = rsp.size; } out: if (ret < 0) { g_free (rsp.content); } return ret; } int http_post (Connection *conn, const char *url, const char *token, const char *req_content, gint64 req_size, int *rsp_status, char **rsp_content, gint64 *rsp_size, gboolean timeout, int timeout_sec) { struct curl_slist *headers = NULL; int ret = 0; CURL *curl; curl = conn->curl; g_return_val_if_fail (req_content != NULL, -1); ret = http_post_common (curl, url, &headers, token, req_content, req_size, rsp_status, rsp_content, rsp_size, timeout, timeout_sec); if (ret < 0) { conn->release = TRUE; } curl_slist_free_all (headers); return ret; } static char * parse_nickname (const char *rsp_content, int rsp_size) { json_t *array = NULL, *object, *member; json_error_t jerror; size_t n; int i; char *nickname = NULL; object = json_loadb (rsp_content, rsp_size, 0, &jerror); if (!object) { seaf_warning ("Parse response failed: %s.\n", jerror.text); return NULL; } array = json_object_get (object, "user_list"); if (!array) { goto out; } n = json_array_size (array); for (i = 0; i < n; ++i) { json_t *obj = json_array_get (array, i); member = json_object_get (obj, "name"); if (!member) { continue; } nickname = g_strdup (json_string_value(member)); break; } out: json_decref (object); return nickname; } static char * gen_jwt_token () { char *jwt_token = NULL; gint64 now = (gint64)time(NULL); jwt_t *jwt = NULL; if (!seaf->seahub_pk) { return NULL; } int ret = jwt_new (&jwt); if (ret != 0 || jwt == NULL) { seaf_warning ("Failed to create jwt\n"); goto out; } ret = jwt_add_grant_bool (jwt, "is_internal", TRUE); if (ret != 0) { seaf_warning ("Failed to add is_internal to jwt\n"); goto out; } ret = jwt_add_grant_int (jwt, "exp", now + 300); if (ret != 0) { seaf_warning ("Failed to add expire time to jwt\n"); goto out; } ret = jwt_set_alg (jwt, JWT_ALG_HS256, (unsigned char *)seaf->seahub_pk, strlen(seaf->seahub_pk)); if (ret != 0) { seaf_warning ("Failed to set alg\n"); goto out; } jwt_token = jwt_encode_str (jwt); out: jwt_free (jwt); return jwt_token; } char * http_tx_manager_get_nickname (const char *modifier) { Connection *conn = NULL; struct curl_slist *headers = NULL; int ret = 0; CURL *curl; json_t *content = NULL; json_t *array = NULL; int rsp_status; char *req_content = NULL; char *jwt_token = NULL; char *rsp_content = NULL; char *nickname = NULL; gint64 rsp_size; char *url = NULL; jwt_token = gen_jwt_token (); if (!jwt_token) { return NULL; } conn = connection_pool_get_connection (seaf->seahub_conn_pool); if (!conn) { g_free (jwt_token); seaf_warning ("Failed to get connection: out of memory.\n"); return NULL; } content = json_object (); array = json_array (); json_array_append_new (array, json_string (modifier)); json_object_set_new (content, "user_id_list", array); req_content = json_dumps (content, JSON_COMPACT); if (!req_content) { json_decref (content); seaf_warning ("Failed to dump json request.\n"); goto out; } json_decref (content); curl = conn->curl; url = g_strdup_printf("%s/user-list/", seaf->seahub_url); ret = http_post_common (curl, url, &headers, jwt_token, req_content, strlen(req_content), &rsp_status, &rsp_content, &rsp_size, TRUE, 45); if (ret < 0) { conn->release = TRUE; goto out; } if (rsp_status != HTTP_OK) { goto out; } nickname = parse_nickname (rsp_content, rsp_size); out: g_free (url); g_free (jwt_token); g_free (req_content); g_free (rsp_content); curl_slist_free_all (headers); connection_pool_return_connection (seaf->seahub_conn_pool, conn); return nickname; } static SeafileShareLinkInfo * parse_share_link_info (const char *rsp_content, int rsp_size) { json_t *object; json_error_t jerror; size_t n; int i; const char *repo_id = NULL; const char *file_path = NULL; const char *parent_dir = NULL; const char *share_type = NULL; SeafileShareLinkInfo *info = NULL; object = json_loadb (rsp_content, rsp_size, 0, &jerror); if (!object) { seaf_warning ("Parse response failed: %s.\n", jerror.text); return NULL; } repo_id = json_object_get_string_member (object, "repo_id"); if (!repo_id) { seaf_warning ("Failed to find repo_id in json.\n"); goto out; } file_path = json_object_get_string_member (object, "file_path"); parent_dir = json_object_get_string_member (object, "parent_dir"); share_type = json_object_get_string_member (object, "share_type"); info = g_object_new (SEAFILE_TYPE_SHARE_LINK_INFO, "repo_id", repo_id, "file_path", file_path, "parent_dir", parent_dir, "share_type", share_type, NULL); out: json_decref (object); return info; } char * parse_error_message (const char *rsp_content, int rsp_size) { json_t *object; json_error_t jerror; const char *err_msg = NULL; char *ret = NULL; if (!rsp_content) { return NULL; } object = json_loadb (rsp_content, rsp_size, 0, &jerror); if (!object) { ret = g_strdup (rsp_content); return ret; } err_msg = json_object_get_string_member (object, "error_msg"); if (!err_msg) { ret = g_strdup (rsp_content); goto out; } ret = g_strdup (err_msg); out: json_decref (object); return ret; } SeafileShareLinkInfo * http_tx_manager_query_share_link_info (const char *token, const char *cookie, const char *type, const char *ip_addr, const char *user_agent, int *status, char **err_msg) { Connection *conn = NULL; char *cookie_header; struct curl_slist *headers = NULL; int ret = 0; CURL *curl; json_t *content = NULL; char *req_content = NULL; int rsp_status; char *jwt_token = NULL; char *rsp_content = NULL; gint64 rsp_size; SeafileShareLinkInfo *info = NULL; char *url = NULL; jwt_token = gen_jwt_token (); if (!jwt_token) { return NULL; } conn = connection_pool_get_connection (seaf->seahub_conn_pool); if (!conn) { g_free (jwt_token); seaf_warning ("Failed to get connection: out of memory.\n"); return NULL; } content = json_object (); json_object_set_new (content, "token", json_string(token)); if (ip_addr) json_object_set_new (content, "ip_addr", json_string(ip_addr)); if (user_agent) json_object_set_new (content, "user_agent", json_string(user_agent)); req_content = json_dumps (content, JSON_COMPACT); if (!req_content) { seaf_warning ("Failed to dump json request.\n"); goto out; } curl = conn->curl; if (cookie) { cookie_header = g_strdup_printf ("Cookie: %s", cookie); headers = curl_slist_append (headers, cookie_header); g_free (cookie_header); } url = g_strdup_printf("%s/check-share-link-access/?type=%s", seaf->seahub_url, type); ret = http_post_common (curl, url, &headers, jwt_token, req_content, strlen(req_content), &rsp_status, &rsp_content, &rsp_size, TRUE, 45); if (ret < 0) { conn->release = TRUE; goto out; } *status = rsp_status; if (rsp_status != HTTP_OK) { *err_msg = parse_error_message (rsp_content, rsp_size); goto out; } info = parse_share_link_info (rsp_content, rsp_size); out: if (content) json_decref (content); g_free (url); g_free (jwt_token); g_free (req_content); g_free (rsp_content); curl_slist_free_all (headers); connection_pool_return_connection (seaf->seahub_conn_pool, conn); return info; } char * parse_file_access_info (const char *rsp_content, int rsp_size) { json_t *object; json_error_t jerror; const char *user = NULL; char *ret = NULL; object = json_loadb (rsp_content, rsp_size, 0, &jerror); if (!object) { seaf_warning ("Failed to parse response when check file access in Seahub: %s.\n", jerror.text); return NULL; } user = json_object_get_string_member (object, "user"); if (!user) { seaf_warning ("Failed to find user in json when check file access in Seahub.\n"); goto out; } ret = g_strdup (user); out: json_decref (object); return ret; } int http_tx_manager_check_file_access (const char *repo_id, const char *token, const char *cookie, const char *path, const char *op, const char *ip_addr, const char *user_agent, char **user, int *status, char **err_msg) { Connection *conn = NULL; char *cookie_header; struct curl_slist *headers = NULL; int ret = -1; CURL *curl; json_t *content = NULL; int rsp_status; char *req_content = NULL; char *jwt_token = NULL; char *rsp_content = NULL; gint64 rsp_size; char *url = NULL; jwt_token = gen_jwt_token (); if (!jwt_token) { return -1; } conn = connection_pool_get_connection (seaf->seahub_conn_pool); if (!conn) { g_free (jwt_token); seaf_warning ("Failed to get connection: out of memory.\n"); return -1; } content = json_object (); json_object_set_new (content, "op", json_string(op)); if (token) { json_object_set_new (content, "token", json_string(token)); } json_object_set_new (content, "path", json_string(path)); if (ip_addr) json_object_set_new (content, "ip_addr", json_string(ip_addr)); if (user_agent) json_object_set_new (content, "user_agent", json_string(user_agent)); req_content = json_dumps (content, JSON_COMPACT); if (!req_content) { ret = -1; seaf_warning ("Failed to dump json request.\n"); goto out; } curl = conn->curl; if (cookie) { cookie_header = g_strdup_printf ("Cookie: %s", cookie); headers = curl_slist_append (headers, cookie_header); g_free (cookie_header); } url = g_strdup_printf("%s/repos/%s/check-access/", seaf->seahub_url, repo_id); ret = http_post_common (curl, url, &headers, jwt_token, req_content, strlen(req_content), &rsp_status, &rsp_content, &rsp_size, TRUE, 45); if (ret < 0) { conn->release = TRUE; goto out; } *status = rsp_status; if (rsp_status != HTTP_OK) { *err_msg = parse_error_message (rsp_content, rsp_size); ret = -1; goto out; } *user = parse_file_access_info (rsp_content, rsp_size); if (*user == NULL) { ret = -1; goto out; } out: if (content) json_decref (content); g_free (url); g_free (jwt_token); g_free (req_content); g_free (rsp_content); curl_slist_free_all (headers); connection_pool_return_connection (seaf->seahub_conn_pool, conn); return ret; } ================================================ FILE: server/http-tx-mgr.h ================================================ #ifndef HTTP_TX_MGR_H #define HTTP_TX_MGR_H #include #define HTTP_OK 200 #define HTTP_BAD_REQUEST 400 #define HTTP_FORBIDDEN 403 #define HTTP_NOT_FOUND 404 #define HTTP_NO_QUOTA 443 #define HTTP_REPO_DELETED 444 #define HTTP_INTERNAL_SERVER_ERROR 500 typedef struct _Connection Connection; typedef struct _ConnectionPool ConnectionPool; ConnectionPool * connection_pool_new (); Connection * connection_pool_get_connection (ConnectionPool *pool); void connection_pool_return_connection (ConnectionPool *pool, Connection *conn); void connection_pool_free (ConnectionPool *pool); char* http_code_to_str (int http_code); typedef size_t (*HttpRecvCallback) (void *, size_t, size_t, void *); int http_get (Connection *conn, const char *url, const char *token, int *rsp_status, char **rsp_content, gint64 *rsp_size, HttpRecvCallback callback, void *cb_data, gboolean timeout); int http_post (Connection *conn, const char *url, const char *token, const char *req_content, gint64 req_size, int *rsp_status, char **rsp_content, gint64 *rsp_size, gboolean timeout, int timeout_sec); void http_tx_manager_init (); char * http_tx_manager_get_nickname (const char *modifier); SeafileShareLinkInfo * http_tx_manager_query_share_link_info (const char *token, const char *cookie, const char *type, const char *ip_addr, const char *user_agent, int *status, char **err_msg); int http_tx_manager_check_file_access (const char *repo_id, const char *token, const char *cookie, const char *path, const char *op, const char *ip_addr, const char *user_agent, char **user, int *status, char **err_msg); #endif ================================================ FILE: server/index-blocks-mgr.c ================================================ /* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */ #include "common.h" #include #include #include #include #include "utils.h" #include "log.h" #include "seafile-session.h" #include "repo-mgr.h" #include "fs-mgr.h" #include "seafile-error.h" #include "seafile-crypt.h" #include "index-blocks-mgr.h" #define TOKEN_LEN 36 #define PROGRESS_TTL 5 * 3600 // 5 hours #define SCAN_PROGRESS_INTERVAL 24 * 3600 // 1 day static void start_index_task (gpointer data, gpointer user_data); static char * gen_new_token (GHashTable *token_hash); static int scan_progress (void *data); struct SeafileCrypt; typedef struct IndexBlksMgrPriv { pthread_mutex_t progress_lock; GHashTable *progress_store; GThreadPool *idx_tpool; // This timer is used to scan progress and remove invalid progress. CcnetTimer *scan_progress_timer; } IndexBlksMgrPriv; typedef struct IndexPara { GList *filenames; GList *paths; SeafRepo *repo; char *user; char *canon_path; int replace_existed; SeafileCrypt *crypt; gboolean ret_json; IdxProgress *progress; } IndexPara; static void free_progress (IdxProgress *progress) { if (!progress) return; g_free (progress->ret_json); g_free (progress); } IndexBlksMgr * index_blocks_mgr_new (SeafileSession *session) { GError *error = NULL; IndexBlksMgr *mgr = g_new0 (IndexBlksMgr, 1); IndexBlksMgrPriv *priv = g_new0 (IndexBlksMgrPriv, 1); priv->idx_tpool = g_thread_pool_new (start_index_task, priv, session->max_index_processing_threads, FALSE, &error); if (!priv->idx_tpool) { if (error) { seaf_warning ("Failed to create index task thread pool: %s.\n", error->message); g_clear_error (&error); } else { seaf_warning ("Failed to create index task thread pool.\n"); } g_free (priv); g_free (mgr); return NULL; } pthread_mutex_init (&priv->progress_lock, NULL); priv->progress_store = g_hash_table_new_full (g_str_hash, g_str_equal, g_free, (GDestroyNotify)free_progress); priv->scan_progress_timer = ccnet_timer_new (scan_progress, priv, SCAN_PROGRESS_INTERVAL * 1000); mgr->priv = priv; return mgr; } static int scan_progress (void *data) { time_t now = time(NULL); IndexBlksMgrPriv *priv = data; GHashTableIter iter; gpointer key, value; IdxProgress *progress; pthread_mutex_lock (&priv->progress_lock); g_hash_table_iter_init (&iter, priv->progress_store); while (g_hash_table_iter_next (&iter, &key, &value)) { progress = value; if (now >= progress->expire_ts && progress->status != 1) { g_hash_table_iter_remove (&iter); } } pthread_mutex_unlock (&priv->progress_lock); return TRUE; } static void free_index_para (IndexPara *idx_para) { if (!idx_para) return; string_list_free (idx_para->filenames); string_list_free (idx_para->paths); seaf_repo_unref (idx_para->repo); g_free (idx_para->user); g_free (idx_para->canon_path); g_free (idx_para->crypt); g_free (idx_para); } static void start_index_task (gpointer data, gpointer user_data) { IndexPara *idx_para = data; SeafRepo *repo = idx_para->repo; GList *ptr = NULL, *id_list = NULL, *size_list = NULL; char *path = NULL; char *ret_json = NULL; char *gc_id = NULL; char hex[41]; unsigned char sha1[20]; int ret = 0; IdxProgress *progress = idx_para->progress; SeafileCrypt *crypt = idx_para->crypt; gc_id = seaf_repo_get_current_gc_id(repo); gint64 *size; for (ptr = idx_para->paths; ptr; ptr = ptr->next) { path = ptr->data; size = g_new (gint64, 1); if (seaf_fs_manager_index_blocks (seaf->fs_mgr, repo->store_id, repo->version, path, sha1, size, crypt, TRUE, FALSE, &(progress->indexed)) < 0) { seaf_warning ("failed to index blocks"); progress->status = -1; goto out; } rawdata_to_hex(sha1, hex, 20); id_list = g_list_prepend (id_list, g_strdup(hex)); size_list = g_list_prepend (size_list, size); } id_list = g_list_reverse (id_list); size_list = g_list_reverse (size_list); ret = post_files_and_gen_commit (idx_para->filenames, idx_para->repo->id, idx_para->user, idx_para->ret_json ? &ret_json : NULL, idx_para->replace_existed, idx_para->canon_path, id_list, size_list, 0, gc_id, NULL); progress->status = ret; if (idx_para->ret_json) { progress->ret_json = g_strdup(ret_json); g_free (ret_json); } out: /* remove temp files */ for (ptr = idx_para->paths; ptr; ptr = ptr->next) g_unlink (ptr->data); g_list_free_full (id_list, g_free); g_list_free_full (size_list, g_free); free_index_para (idx_para); g_free (gc_id); return; } char * index_blocks_mgr_query_progress (IndexBlksMgr *mgr, const char *token, GError **error) { char *ret_info; json_t *obj; IdxProgress *progress; IndexBlksMgrPriv *priv = mgr->priv; pthread_mutex_lock (&priv->progress_lock); progress = g_hash_table_lookup (priv->progress_store, token); pthread_mutex_unlock (&priv->progress_lock); if (!progress) { seaf_warning ("Index progress not found for token %s\n", token); g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "Index progress not found"); return NULL; } obj = json_object (); json_object_set_int_member (obj, "indexed", progress->indexed); json_object_set_int_member (obj, "total", progress->total); json_object_set_int_member (obj, "status", progress->status); json_object_set_string_member (obj, "ret_json", progress->ret_json); ret_info = json_dumps (obj, JSON_COMPACT); json_decref (obj); /* index finished */ if (progress->status != 1) { pthread_mutex_lock (&priv->progress_lock); g_hash_table_remove (priv->progress_store, token); pthread_mutex_unlock (&priv->progress_lock); } return ret_info; } int index_blocks_mgr_start_index (IndexBlksMgr *mgr, GList *filenames, GList *paths, const char *repo_id, const char *user, int replace_existed, gboolean ret_json, const char *canon_path, SeafileCrypt *crypt, char **task_id) { GList *ptr = NULL; char *path = NULL, *token = NULL; SeafileCrypt *_crypt = NULL; SeafRepo *repo = seaf_repo_manager_get_repo (seaf->repo_mgr, repo_id); if (!repo) { seaf_warning ("Failed to get repo %.8s.\n", repo_id); return -1; } IndexBlksMgrPriv *priv = mgr->priv; token = gen_new_token(priv->progress_store); if (!token) { seaf_warning ("Failed to genarate index token for repo %.8s.\n", repo_id); seaf_repo_unref (repo); return -1; } if (crypt) { _crypt = g_new0(SeafileCrypt, 1); memcpy (_crypt, crypt, sizeof (SeafileCrypt)); } *task_id = g_strdup (token); IdxProgress *progress = g_new0(IdxProgress, 1); progress->status = 1; IndexPara *idx_para = g_new0 (IndexPara, 1); idx_para->filenames = g_list_copy_deep (filenames, (GCopyFunc)g_strdup, NULL); idx_para->paths = g_list_copy_deep (paths, (GCopyFunc)g_strdup, NULL); idx_para->repo = repo; idx_para->user = g_strdup (user); idx_para->canon_path = g_strdup(canon_path); idx_para->replace_existed = replace_existed; idx_para->ret_json = ret_json; idx_para->crypt = _crypt; idx_para->progress = progress; progress->status = 1; progress->expire_ts = time(NULL) + PROGRESS_TTL; /* Get total size of all files for progress. */ for (ptr = paths; ptr; ptr = ptr->next) { SeafStat sb; path = ptr->data; if (seaf_stat (path, &sb) < 0) { seaf_warning ("Bad file %s: %s.\n", path, strerror(errno)); goto error; } if (!S_ISREG(sb.st_mode)) goto error; progress->total += (gint64)sb.st_size; } pthread_mutex_lock (&priv->progress_lock); g_hash_table_replace (priv->progress_store, g_strdup (token), progress); pthread_mutex_unlock (&priv->progress_lock); g_thread_pool_push (priv->idx_tpool, idx_para, NULL); g_free (token); return 0; error: g_free (token); /* remove temp files */ for (ptr = idx_para->paths; ptr; ptr = ptr->next) g_unlink (ptr->data); free_index_para (idx_para); g_free (progress); return -1; } static char * gen_new_token (GHashTable *token_hash) { char uuid[37]; char *token; while (1) { gen_uuid_inplace (uuid); token = g_strndup(uuid, TOKEN_LEN); /* Make sure the new token doesn't conflict with an existing one. */ if (g_hash_table_lookup (token_hash, token) != NULL) g_free (token); else return token; } } ================================================ FILE: server/index-blocks-mgr.h ================================================ #ifndef INDEX_BLOCKS_MGR_H #define INDEX_BLOCKS_MGR_H #include "seafile-object.h" struct IndexBlksMgrPriv; struct _SeafileSession; typedef struct IndexBlksMgr { struct IndexBlksMgrPriv *priv; } IndexBlksMgr; typedef struct IdxProgress { gint64 indexed; gint64 total; int status; /* 0: finished, -1: error, 1: indexing */ char *ret_json; gint64 expire_ts; } IdxProgress; IndexBlksMgr * index_blocks_mgr_new (struct _SeafileSession *session); char * index_blocks_mgr_query_progress (IndexBlksMgr *mgr, const char *token, GError **error); int index_blocks_mgr_start_index (IndexBlksMgr *mgr, GList *filenames, GList *paths, const char *repo_id, const char *user, int replace_existed, gboolean ret_json, const char *canon_path, SeafileCrypt *crypt, char **task_id); #endif ================================================ FILE: server/metric-mgr.c ================================================ /* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */ #include "common.h" #include "utils.h" #include "log.h" #include #include #include "seafile-session.h" #include "metric-mgr.h" #include "obj-cache.h" #define PUBLISH_INTERVAL 30 /* 30 seconds*/ #define REDIS_CHANNEL "metric_channel" #define COMPONENT_NAME "fileserver" struct _SeafMetricManagerPriv { int in_flight_request_count; struct ObjCache *cache; }; SeafMetricManager* seaf_metric_manager_new (struct _SeafileSession *seaf) { SeafMetricManager *mgr = g_new0 (SeafMetricManager, 1); mgr->priv = g_new0 (SeafMetricManagerPriv, 1); mgr->seaf = seaf; // redis cache mgr->priv->cache = seaf->obj_cache; return mgr; } static void * publish_metrics (void *data); int seaf_metric_manager_start (SeafMetricManager *mgr) { pthread_t tid; int rc; rc = pthread_create (&tid, NULL, publish_metrics, mgr); if (rc != 0) { seaf_warning ("Failed to create publish metrics worker thread: %s.\n", strerror(rc)); return -1; } return 0; } void seaf_metric_manager_in_flight_request_inc (SeafMetricManager *mgr) { SeafMetricManagerPriv *priv = mgr->priv; g_atomic_int_inc (&priv->in_flight_request_count); } void seaf_metric_manager_in_flight_request_dec (SeafMetricManager *mgr) { SeafMetricManagerPriv *priv = mgr->priv; g_atomic_int_dec_and_test (&priv->in_flight_request_count); } static int publish_redis_msg (SeafMetricManager *mgr, const char *msg) { SeafMetricManagerPriv *priv = mgr->priv; if (!priv->cache) { return 0; } int ret = objcache_publish (priv->cache, REDIS_CHANNEL, msg); return ret; } static int publish_in_flight_request (SeafMetricManager *mgr) { int ret = 0; json_t *obj = NULL; char *msg = NULL; SeafMetricManagerPriv *priv = mgr->priv; obj = json_object (); json_object_set_new (obj, "metric_name", json_string("in_flight_request_total")); json_object_set_new (obj, "metric_value", json_integer (priv->in_flight_request_count)); json_object_set_new (obj, "metric_type", json_string("gauge")); json_object_set_new (obj, "component_name", json_string(COMPONENT_NAME)); json_object_set_new (obj, "metric_help", json_string("The number of currently running http requests.")); json_object_set_new (obj, "node_name", json_string(seaf->node_name)); msg = json_dumps (obj, JSON_COMPACT); ret = publish_redis_msg (mgr, msg); json_decref (obj); g_free (msg); return ret; } static void do_publish_metrics (SeafMetricManager *mgr) { int rc; // Don't publish metrics when use go fileserver. if (seaf->go_fileserver) { return; } rc = publish_in_flight_request (mgr); if (rc < 0) { seaf_warning ("Failed to publish in flight request\n"); return; } } static void * publish_metrics (void *data) { SeafMetricManager *mgr = data; while (1) { do_publish_metrics (mgr); sleep(PUBLISH_INTERVAL); } return NULL; } ================================================ FILE: server/metric-mgr.h ================================================ /* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */ #ifndef SEAF_METRIC_MGR_H #define SEAF_METRIC_MGR_H struct _SeafMetricManager; typedef struct _SeafMetricManager SeafMetricManager; typedef struct _SeafMetricManagerPriv SeafMetricManagerPriv; struct _SeafMetricManager { struct _SeafileSession *seaf; SeafMetricManagerPriv *priv; }; SeafMetricManager* seaf_metric_manager_new (struct _SeafileSession *seaf); int seaf_metric_manager_start (SeafMetricManager *mgr); void seaf_metric_manager_in_flight_request_inc (SeafMetricManager *mgr); void seaf_metric_manager_in_flight_request_dec (SeafMetricManager *mgr); #endif ================================================ FILE: server/notif-mgr.c ================================================ #include "common.h" #include #include #include #include #include "seafile-session.h" #include "http-tx-mgr.h" #include "notif-mgr.h" #include "utils.h" #include "seafile-error.h" #include "log.h" #define NOTIF_TIMEOUT_SEC 1 #define JWT_TOKEN_EXPIRE_TIME 300 /* 5 minutes */ struct _NotifPriv { char *notif_url; ConnectionPool *connection_pool; }; typedef struct _NotifPriv NotifPriv; typedef struct Event { NotifPriv *priv; char *msg; } Event; NotifManager * seaf_notif_manager_new (struct _SeafileSession *seaf, char *url) { NotifManager *mgr = g_new0 (NotifManager, 1); mgr->seaf = seaf; NotifPriv *priv = g_new0 (NotifPriv, 1); priv->connection_pool = connection_pool_new (); if (!priv->connection_pool) { g_free (priv); g_free (mgr); return NULL; } priv->notif_url = url; mgr->priv = priv; return mgr; } static char * gen_jwt_token () { char *jwt_token = NULL; gint64 now = (gint64)time(NULL); jwt_t *jwt = NULL; if (!seaf->notif_server_private_key) { seaf_warning ("No private key is configured for generating jwt token\n"); return NULL; } int ret = jwt_new (&jwt); if (ret != 0 || jwt == NULL) { seaf_warning ("Failed to create jwt\n"); goto out; } ret = jwt_add_grant_int (jwt, "exp", now + JWT_TOKEN_EXPIRE_TIME); if (ret != 0) { seaf_warning ("Failed to expire time to jwt\n"); goto out; } ret = jwt_set_alg (jwt, JWT_ALG_HS256, (unsigned char *)seaf->notif_server_private_key, strlen(seaf->notif_server_private_key)); if (ret != 0) { seaf_warning ("Failed to set alg\n"); goto out; } jwt_token = jwt_encode_str (jwt); out: jwt_free (jwt); return jwt_token; } static void* send_event (void *data) { Event *event= data; NotifPriv *priv = event->priv; Connection *conn = NULL; int rsp_status; char *req_url = NULL; char *jwt_token = NULL; jwt_token = gen_jwt_token (); if (!jwt_token) { return event; } conn = connection_pool_get_connection (priv->connection_pool); if (!conn) { g_free (jwt_token); seaf_warning ("Failed to get connection: out of memory.\n"); return event; } req_url = g_strdup_printf ("%s/events", priv->notif_url); int ret; ret = http_post (conn, req_url, jwt_token, event->msg, strlen (event->msg), &rsp_status, NULL, NULL, TRUE, NOTIF_TIMEOUT_SEC); if (ret < 0) { goto out; } if (rsp_status != HTTP_OK) { seaf_warning ("Failed to send event to notification server %s: %d.\n", priv->notif_url, rsp_status); } out: g_free (jwt_token); g_free (req_url); connection_pool_return_connection (priv->connection_pool, conn); return event; } static void free_send_event(void *data) { if (!data) return; Event *event= data; if (event->msg) g_free (event->msg); g_free (event); } void seaf_notif_manager_send_event (NotifManager *mgr, const char *msg) { Event *event = g_new0 (Event, 1); event->priv = mgr->priv; event->msg = g_strdup (msg); ccnet_job_manager_schedule_job (seaf->job_mgr, send_event, free_send_event, event); } ================================================ FILE: server/notif-mgr.h ================================================ #ifndef HTTP_NOTIFICATION_MGR_H #define HTTP_NOTIFICATION_MGR_H struct _NotifManager { struct _SeafileSession *seaf; struct _NotifPriv *priv; }; typedef struct _NotifManager NotifManager; NotifManager * seaf_notif_manager_new (struct _SeafileSession *seaf, char *url); void seaf_notif_manager_send_event (NotifManager *mgr, const char *msg); #endif ================================================ FILE: server/pack-dir.c ================================================ #include "common.h" #ifdef HAVE_EVHTP #define DEBUG_FLAG SEAFILE_DEBUG_HTTP #include "log.h" #include "seafile-object.h" #include "seafile-crypt.h" #include "seafile-error.h" #include "utils.h" #include "seafile-session.h" #include "pack-dir.h" #include "seaf-utils.h" #include #include #include #ifdef WIN32 #define S_IFLNK 0120000 /* Symbolic link */ #define S_ISLNK(x) (((x) & S_IFMT) == S_IFLNK) #endif typedef struct { struct archive *a; SeafileCrypt *crypt; const char *top_dir_name; gboolean is_windows; time_t mtime; char store_id[37]; int repo_version; int tmp_fd; char *tmp_zip_file; } PackDirData; static char * do_iconv (char *fromcode, char *tocode, char *in) { iconv_t conv; size_t inlen, outlen, len; char out[1024]; char *pin = in; char *pout = out; conv = iconv_open (tocode, fromcode); if (conv == (iconv_t)-1) { return NULL; } inlen = strlen (in); outlen = sizeof(out); len = iconv (conv, &pin, &inlen, &pout, &outlen); iconv_close (conv); if (len == -1) { return NULL; } outlen = sizeof(out) - outlen; return g_strndup(out, outlen); } static int add_file_to_archive (PackDirData *data, const char *parent_dir, const char *base_name, SeafDirent *dent) { struct archive *a = data->a; struct SeafileCrypt *crypt = data->crypt; gboolean is_windows = data->is_windows; const char *top_dir_name = data->top_dir_name; struct archive_entry *entry = NULL; Seafile *file = NULL; char *pathname = NULL; char buf[64 * 1024]; int len = 0; int n = 0; int idx = 0; BlockHandle *handle = NULL; BlockMetadata *bmd = NULL; char *blk_id = NULL; uint32_t remain = 0; EVP_CIPHER_CTX *ctx; gboolean enc_init = FALSE; char *dec_out = NULL; int dec_out_len = -1; int ret = 0; pathname = g_build_filename (top_dir_name, parent_dir, base_name, NULL); file = seaf_fs_manager_get_seafile (seaf->fs_mgr, data->store_id, data->repo_version, dent->id); if (!file) { ret = -1; goto out; } entry = archive_entry_new (); /* File name fixup for WinRAR */ if (is_windows && seaf->http_server->windows_encoding) { char *win_file_name = do_iconv ("UTF-8", seaf->http_server->windows_encoding, pathname); if (!win_file_name) { seaf_warning ("Failed to convert file name to %s\n", seaf->http_server->windows_encoding); ret = -1; goto out; } archive_entry_copy_pathname (entry, win_file_name); g_free (win_file_name); } else { archive_entry_set_pathname (entry, pathname); } /* FIXME: 0644 should be set when upload files in repo-mgr.c */ archive_entry_set_mode (entry, dent->mode | 0644); archive_entry_set_size (entry, file->file_size); archive_entry_set_mtime (entry, data->mtime, 0); n = archive_write_header (a, entry); if (n != ARCHIVE_OK) { seaf_warning ("archive_write_header error: %s\n", archive_error_string(a)); ret = -1; goto out; } /* Read data of this entry block by block */ while (idx < file->n_blocks) { blk_id = file->blk_sha1s[idx]; handle = seaf_block_manager_open_block (seaf->block_mgr, data->store_id, data->repo_version, blk_id, BLOCK_READ); if (!handle) { seaf_warning ("Failed to open block %s:%s\n", data->store_id, blk_id); ret = -1; goto out; } bmd = seaf_block_manager_stat_block_by_handle (seaf->block_mgr, handle); if (!bmd) { seaf_warning ("Failed to stat block %s:%s\n", data->store_id, blk_id); ret = -1; goto out; } remain = bmd->size; g_free (bmd); if (crypt) { if (seafile_decrypt_init (&ctx, crypt->version, crypt->key, crypt->iv) < 0) { seaf_warning ("Failed to init decrypt.\n"); ret = -1; goto out; } enc_init = TRUE; } while (remain != 0) { n = seaf_block_manager_read_block (seaf->block_mgr, handle, buf, sizeof(buf)); if (n <= 0) { seaf_warning ("failed to read block %s\n", blk_id); ret = -1; goto out; } remain -= n; /* OK, We're read some data of this block */ if (crypt == NULL) { /* not encrypted */ len = archive_write_data (a, buf, n); if (len <= 0) { seaf_warning ("archive_write_data error: %s\n", archive_error_string(a)); ret = -1; goto out; } } else { /* an encrypted block */ dec_out = g_new (char, n + 16); if (!dec_out) { seaf_warning ("Failed to alloc memory.\n"); ret = -1; goto out; } int r = EVP_DecryptUpdate (ctx, (unsigned char *)dec_out, &dec_out_len, (unsigned char *)buf, n); /* EVP_DecryptUpdate returns 1 on success, 0 on failure */ if (r != 1) { seaf_warning ("Decrypt block %s failed.\n", blk_id); ret = -1; goto out; } if (dec_out_len > 0) { len = archive_write_data (a, dec_out, dec_out_len); if (len <= 0) { seaf_warning ("archive_write_data error: %s\n", archive_error_string(a)); ret = -1; goto out; } } /* If it's the last piece of a block, call decrypt_final() * to decrypt the possible partial block. */ if (remain == 0) { r = EVP_DecryptFinal_ex (ctx, (unsigned char *)dec_out, &dec_out_len); if (r != 1) { seaf_warning ("Decrypt block %s failed.\n", blk_id); ret = -1; goto out; } if (dec_out_len != 0) { len = archive_write_data (a, dec_out, dec_out_len); if (len <= 0) { seaf_warning ("archive_write_data error: %s\n", archive_error_string(a)); ret = -1; goto out; } } } g_free (dec_out); dec_out = NULL; } } seaf_block_manager_close_block (seaf->block_mgr, handle); seaf_block_manager_block_handle_free (seaf->block_mgr, handle); handle = NULL; /* turn to next block */ idx++; } out: g_free (pathname); if (entry) archive_entry_free (entry); if (file) seafile_unref (file); if (handle) { seaf_block_manager_close_block (seaf->block_mgr, handle); seaf_block_manager_block_handle_free(seaf->block_mgr, handle); } if (crypt != NULL && enc_init) EVP_CIPHER_CTX_free (ctx); g_free (dec_out); return ret; } static int archive_dir (PackDirData *data, const char *root_id, const char *dirpath, Progress *progress) { SeafDir *dir = NULL; SeafDirent *dent; GList *ptr; char *subpath = NULL; int ret = 0; dir = seaf_fs_manager_get_seafdir (seaf->fs_mgr, data->store_id, data->repo_version, root_id); if (!dir) { seaf_warning ("failed to get dir %s:%s\n", data->store_id, root_id); goto out; } if (!dir->entries) { char *pathname = g_build_filename (data->top_dir_name, dirpath, NULL); struct archive_entry *entry = archive_entry_new (); gboolean is_windows = data->is_windows; if (is_windows && seaf->http_server->windows_encoding) { char *win_file_name = do_iconv ("UTF-8", seaf->http_server->windows_encoding, pathname); if (!win_file_name) { seaf_warning ("Failed to convert file name to %s\n", seaf->http_server->windows_encoding); ret = -1; goto out; } archive_entry_copy_pathname (entry, win_file_name); g_free (win_file_name); } else { archive_entry_set_pathname (entry, pathname); } archive_entry_set_filetype (entry, AE_IFDIR); archive_entry_set_mtime (entry, data->mtime, 0); archive_entry_set_perm (entry, 0755); int n = archive_write_header (data->a, entry); if (n != ARCHIVE_OK) { seaf_warning ("archive_write_header error: %s\n", archive_error_string(data->a)); ret = -1; } archive_entry_free (entry); g_free (pathname); goto out; } for (ptr = dir->entries; ptr; ptr = ptr->next) { if (progress->canceled) { ret = -1; goto out; } dent = ptr->data; if (S_ISREG(dent->mode)) { ret = add_file_to_archive (data, dirpath, dent->name, dent); if (ret == 0) { g_atomic_int_inc (&progress->zipped); } } else if (S_ISLNK(dent->mode)) { if (archive_version_number() >= 3000001) { /* Symlink in zip arhive is not supported in earlier version * of libarchive */ ret = add_file_to_archive (data, dirpath, dent->name, dent); } } else if (S_ISDIR(dent->mode)) { subpath = g_build_filename (dirpath, dent->name, NULL); ret = archive_dir (data, dent->id, subpath, progress); g_free (subpath); } if (ret < 0) { goto out; } } out: if (dir) seaf_dir_free (dir); return ret; } static PackDirData * pack_dir_data_new (const char *store_id, int repo_version, const char *dirname, SeafileCrypt *crypt, gboolean is_windows) { struct archive *a = NULL; char *tmpfile_name = NULL ; int fd = -1; PackDirData *data = NULL; tmpfile_name = g_strdup_printf ("%s/seafile-XXXXXX.zip", seaf->http_server->http_temp_dir); fd = g_mkstemp (tmpfile_name); if (fd < 0) { seaf_warning ("Failed to open temp file: %s.\n", strerror (errno)); g_free (tmpfile_name); return NULL; } a = archive_write_new (); archive_write_add_filter_none (a); archive_write_set_format_zip (a); archive_write_open_fd (a, fd); data = g_new0 (PackDirData, 1); data->crypt = crypt; data->is_windows = is_windows; data->a = a; data->top_dir_name = dirname; data->mtime = time(NULL); memcpy (data->store_id, store_id, 36); data->repo_version = repo_version; data->tmp_fd = fd; data->tmp_zip_file = tmpfile_name; return data; } static gboolean name_exists (GList *file_list, const char *filename) { GList *ptr; char *name; for (ptr = file_list; ptr != NULL; ptr = ptr->next) { name = ptr->data; if (strcmp (name, filename) == 0) return TRUE; } return FALSE; } static char * generate_unique_filename (const char *file, GList *file_list) { int i = 1; char *name, *ext, *unique_name; unique_name = g_strdup(file); split_filename (unique_name, &name, &ext); while (name_exists (file_list, unique_name)) { g_free (unique_name); if (ext) unique_name = g_strdup_printf ("%s (%d).%s", name, i, ext); else unique_name = g_strdup_printf ("%s (%d)", name, i); i++; } g_free (name); g_free (ext); return unique_name; } static int archive_multi (PackDirData *data, GList *dirent_list, Progress *progress) { GList *iter; SeafDirent *dirent; GList *file_list = NULL; for (iter = dirent_list; iter; iter = iter->next) { char *unique_name = NULL; if (progress->canceled) { string_list_free (file_list); return -1; } dirent = iter->data; if (S_ISREG(dirent->mode)) { unique_name = generate_unique_filename (dirent->name, file_list); file_list = g_list_prepend (file_list, unique_name); if (add_file_to_archive (data, "", unique_name, dirent) < 0) { string_list_free (file_list); seaf_warning ("Failed to archive file: %s.\n", dirent->name); return -1; } g_atomic_int_inc (&progress->zipped); } else if (S_ISDIR(dirent->mode)) { unique_name = generate_unique_filename (dirent->name, file_list); file_list = g_list_prepend (file_list, unique_name); if (archive_dir (data, dirent->id, unique_name, progress) < 0) { string_list_free (file_list); seaf_warning ("Failed to archive dir: %s.\n", dirent->name); return -1; } } } string_list_free (file_list); return 0; } int pack_files (const char *store_id, int repo_version, const char *dirname, void *internal, SeafileCrypt *crypt, gboolean is_windows, Progress *progress) { int ret = 0; PackDirData *data = NULL; data = pack_dir_data_new (store_id, repo_version, dirname, crypt, is_windows); if (!data) { seaf_warning ("Failed to create pack dir data for %s.\n", strcmp (dirname, "")==0 ? "multi files" : dirname); return -1; } progress->zip_file_path = data->tmp_zip_file; if (strcmp (dirname, "") != 0) { // Pack dir if (archive_dir (data, (char *)internal, "", progress) < 0) { if (progress->canceled) seaf_warning ("Zip task for dir %s in repo %.8s canceled.\n", dirname, store_id); else seaf_warning ("Failed to archive dir %s in repo %.8s.\n", dirname, store_id); ret = -1; } } else { // Pack multi if (archive_multi (data, (GList *)internal, progress) < 0) { if (progress->canceled) seaf_warning ("Archiving multi files in repo %.8s canceled.\n", store_id); else seaf_warning ("Failed to archive multi files in repo %.8s.\n", store_id); ret = -1; } } if (archive_write_free (data->a) < 0) { seaf_warning ("Failed to archive write finish for %s in repo %.8s.\n", strcmp (dirname, "")==0 ? "multi files" : dirname, store_id); ret = -1; } close (data->tmp_fd); free (data); return ret; } #endif ================================================ FILE: server/pack-dir.h ================================================ #ifndef PACK_DIR_H #define PACK_DIR_H #ifdef HAVE_EVHTP /* Pack a seafile directory to a zipped archive, saved in a temporary file. Return the path of this temporary file. */ typedef struct Progress { int zipped; int total; char *zip_file_path; gint64 expire_ts; gboolean canceled; gboolean size_too_large; gboolean internal_error; } Progress; int pack_files (const char *store_id, int repo_version, const char *dirname, void *internal, SeafileCrypt *crypt, gboolean is_windows, Progress *progress); #endif #endif ================================================ FILE: server/passwd-mgr.c ================================================ #include "common.h" #include "log.h" #include #include #include "seafile-session.h" #include "seafile-object.h" #include "seafile-error.h" #include "seafile-crypt.h" #include "utils.h" #define REAP_INTERVAL 60 #define REAP_THRESHOLD 3600 typedef struct { int enc_version; unsigned char key[32]; unsigned char iv[16]; guint64 expire_time; } DecryptKey; struct _SeafPasswdManagerPriv { GHashTable *decrypt_keys; CcnetTimer *reap_timer; }; static int reap_expired_passwd (void *vmgr); static void decrypt_key_free (DecryptKey *key) { if (!key) return; /* clear sensitive information */ memset (key->key, 0, sizeof(key->key)); memset (key->iv, 0, sizeof(key->iv)); g_free (key); } SeafPasswdManager * seaf_passwd_manager_new (struct _SeafileSession *session) { SeafPasswdManager *mgr = g_new0 (SeafPasswdManager, 1); mgr->session = session; mgr->priv = g_new0 (struct _SeafPasswdManagerPriv, 1); mgr->priv->decrypt_keys = g_hash_table_new_full (g_str_hash, g_str_equal, g_free, (GDestroyNotify)decrypt_key_free); return mgr; } int seaf_passwd_manager_start (SeafPasswdManager *mgr) { mgr->priv->reap_timer = ccnet_timer_new (reap_expired_passwd, mgr, REAP_INTERVAL * 1000); return 1; } int seaf_passwd_manager_check_passwd (SeafPasswdManager *mgr, const char *repo_id, const char *magic, GError **error) { SeafRepo *repo = seaf_repo_manager_get_repo (seaf->repo_mgr, repo_id); if (!repo) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid repo"); return -1; } if (!repo->encrypted) { seaf_repo_unref (repo); g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Repo is not encrypted"); return -1; } if (strcmp (magic, repo->magic) != 0) { seaf_repo_unref (repo); g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "Incorrect password"); return -1; } seaf_repo_unref (repo); return 0; } int seaf_passwd_manager_set_passwd (SeafPasswdManager *mgr, const char *repo_id, const char *user, const char *passwd, GError **error) { SeafRepo *repo = seaf_repo_manager_get_repo (seaf->repo_mgr, repo_id); DecryptKey *crypt_key; GString *hash_key; if (!repo) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid repo"); return -1; } if (!repo->encrypted) { seaf_repo_unref (repo); g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Repo is not encrypted"); return -1; } if (repo->enc_version != 1 && repo->enc_version != 2 && repo->enc_version != 3 && repo->enc_version != 4) { seaf_repo_unref (repo); g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Unsupported encryption version"); return -1; } if (repo->pwd_hash_algo) { if (seafile_pwd_hash_verify_repo_passwd (repo->enc_version, repo->id, passwd, repo->salt, repo->pwd_hash, repo->pwd_hash_algo, repo->pwd_hash_params) < 0) { seaf_repo_unref (repo); g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "Incorrect password"); return -1; } } else { if (seafile_verify_repo_passwd (repo->id, passwd, repo->magic, repo->enc_version, repo->salt) < 0) { seaf_repo_unref (repo); g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "Incorrect password"); return -1; } } crypt_key = g_new0 (DecryptKey, 1); if (!crypt_key) { seaf_warning ("Failed to alloc crypt key struct.\n"); seaf_repo_unref (repo); g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_INTERNAL, "Internal server error"); return -1; } if (seafile_decrypt_repo_enc_key (repo->enc_version, passwd, repo->random_key, repo->salt, crypt_key->key, crypt_key->iv) < 0) { seaf_repo_unref (repo); g_free (crypt_key); g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "Incorrect password"); return -1; } crypt_key->expire_time = (guint64)time(NULL) + REAP_THRESHOLD; crypt_key->enc_version = repo->enc_version; hash_key = g_string_new (NULL); g_string_printf (hash_key, "%s.%s", repo_id, user); /* g_debug ("[passwd mgr] Set passwd for %s\n", hash_key->str); */ g_hash_table_insert (mgr->priv->decrypt_keys, g_string_free (hash_key, FALSE), crypt_key); seaf_repo_unref (repo); return 0; } int seaf_passwd_manager_unset_passwd (SeafPasswdManager *mgr, const char *repo_id, const char *user, GError **error) { GString *hash_key; hash_key = g_string_new (NULL); g_string_printf (hash_key, "%s.%s", repo_id, user); g_hash_table_remove (mgr->priv->decrypt_keys, hash_key->str); g_string_free (hash_key, TRUE); return 0; } gboolean seaf_passwd_manager_is_passwd_set (SeafPasswdManager *mgr, const char *repo_id, const char *user) { GString *key = g_string_new (NULL); gboolean ret = FALSE; g_string_printf (key, "%s.%s", repo_id, user); /* g_debug ("[passwd mgr] check passwd for %s\n", key->str); */ if (g_hash_table_lookup (mgr->priv->decrypt_keys, key->str) != NULL) ret = TRUE; g_string_free (key, TRUE); return ret; } SeafileCryptKey * seaf_passwd_manager_get_decrypt_key (SeafPasswdManager *mgr, const char *repo_id, const char *user) { GString *hash_key; DecryptKey *crypt_key; SeafileCryptKey *ret; char key_hex[65], iv_hex[65]; hash_key = g_string_new (NULL); g_string_printf (hash_key, "%s.%s", repo_id, user); /* g_debug ("[passwd mgr] get passwd for %s.\n", hash_key->str); */ crypt_key = g_hash_table_lookup (mgr->priv->decrypt_keys, hash_key->str); if (!crypt_key) { g_string_free (hash_key, TRUE); return NULL; } if (crypt_key->enc_version >= 2) { rawdata_to_hex (crypt_key->key, key_hex, 32); rawdata_to_hex (crypt_key->iv, iv_hex, 16); } else if (crypt_key->enc_version == 1) { rawdata_to_hex (crypt_key->key, key_hex, 16); rawdata_to_hex (crypt_key->iv, iv_hex, 16); } ret = seafile_crypt_key_new (); g_object_set (ret, "key", key_hex, "iv", iv_hex, NULL); g_string_free (hash_key, TRUE); return ret; } int seaf_passwd_manager_get_decrypt_key_raw (SeafPasswdManager *mgr, const char *repo_id, const char *user, unsigned char *key_out, unsigned char *iv_out) { GString *hash_key; DecryptKey *crypt_key; hash_key = g_string_new (NULL); g_string_printf (hash_key, "%s.%s", repo_id, user); crypt_key = g_hash_table_lookup (mgr->priv->decrypt_keys, hash_key->str); if (!crypt_key) { g_string_free (hash_key, TRUE); return -1; } g_string_free (hash_key, TRUE); if (crypt_key->enc_version == 1) { memcpy (key_out, crypt_key->key, 16); memcpy (iv_out, crypt_key->iv, 16); } else if (crypt_key->enc_version >= 2) { memcpy (key_out, crypt_key->key, 32); memcpy (iv_out, crypt_key->iv, 16); } return 0; } static int reap_expired_passwd (void *vmgr) { SeafPasswdManager *mgr = vmgr; GHashTableIter iter; gpointer key, value; DecryptKey *crypt_key; guint64 now = (guint64)time(NULL); g_hash_table_iter_init (&iter, mgr->priv->decrypt_keys); while (g_hash_table_iter_next (&iter, &key, &value)) { crypt_key = value; if (crypt_key->expire_time <= now) { /* g_debug ("[passwd mgr] Remove passwd for %s\n", (char *)key); */ g_hash_table_iter_remove (&iter); } } return 1; } ================================================ FILE: server/passwd-mgr.h ================================================ #ifndef PASSWD_MGR_H #define PASSWD_MGR_H #include struct _SeafileSession; struct _SeafPasswdManagerPriv; struct _SeafileCryptKey; struct _SeafPasswdManager { struct _SeafileSession *session; struct _SeafPasswdManagerPriv *priv; }; typedef struct _SeafPasswdManager SeafPasswdManager; SeafPasswdManager * seaf_passwd_manager_new (struct _SeafileSession *session); int seaf_passwd_manager_start (SeafPasswdManager *mgr); /** * Check password @magic to access contents of @repo_id. * This function: * 1. check whether @magic is correct; * * Returns 0 if password @magic is correct, -1 otherwise. */ int seaf_passwd_manager_check_passwd (SeafPasswdManager *mgr, const char *repo_id, const char *magic, GError **error); /** * Set @passwd for @user to access contents of @repo_id. * This function: * 1. check whether @passwd is correct; * 2. calculate and store decryption key based on @passwd in memory. * * Returns 0 if @passwd is correct, -1 otherwise. */ int seaf_passwd_manager_set_passwd (SeafPasswdManager *mgr, const char *repo_id, const char *user, const char *passwd, GError **error); /** * Returns 0 if successfully unset user password, -1 otherwise. */ int seaf_passwd_manager_unset_passwd (SeafPasswdManager *mgr, const char *repo_id, const char *user, GError **error); /** * Check whether correct passwd has been set for @user * to access @repo_id. */ gboolean seaf_passwd_manager_is_passwd_set (SeafPasswdManager *mgr, const char *repo_id, const char *user); /** * Returns decryption key for @repo_id, NULL if it's not set. */ struct _SeafileCryptKey * seaf_passwd_manager_get_decrypt_key (SeafPasswdManager *mgr, const char *repo_id, const char *user); int seaf_passwd_manager_get_decrypt_key_raw (SeafPasswdManager *mgr, const char *repo_id, const char *user, unsigned char *key_out, unsigned char *iv_out); #endif ================================================ FILE: server/permission-mgr.c ================================================ /* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */ #include "common.h" #include "db.h" #include "seafile-session.h" #include "permission-mgr.h" #define PERM_DB "perm.db" struct _SeafPermManagerPriv { sqlite3 *db; }; static int load_db (SeafPermManager *mgr); SeafPermManager * seaf_perm_manager_new (SeafileSession *seaf) { SeafPermManager *mgr = g_new0 (SeafPermManager, 1); mgr->priv = g_new0 (SeafPermManagerPriv, 1); mgr->seaf = seaf; return mgr; } int seaf_perm_manager_init (SeafPermManager *mgr) { return load_db (mgr); } static int load_db (SeafPermManager *mgr) { char *db_path = g_build_filename (mgr->seaf->seaf_dir, PERM_DB, NULL); if (sqlite_open_db (db_path, &mgr->priv->db) < 0) { g_critical ("[Permission mgr] Failed to open permission db\n"); g_free (db_path); g_free (mgr); return -1; } g_free (db_path); const char *sql; return 0; } ================================================ FILE: server/permission-mgr.h ================================================ /* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */ #ifndef SEAF_PERM_MGR_H #define SEAF_PERM_MGR_H #include struct _SeafileSession; typedef struct _SeafPermManager SeafPermManager; typedef struct _SeafPermManagerPriv SeafPermManagerPriv; struct _SeafPermManager { struct _SeafileSession *seaf; SeafPermManagerPriv *priv; }; SeafPermManager* seaf_perm_manager_new (struct _SeafileSession *seaf); int seaf_perm_manager_init (SeafPermManager *mgr); int seaf_perm_manager_set_repo_owner (SeafPermManager *mgr, const char *repo_id, const char *user_id); char * seaf_perm_manager_get_repo_owner (SeafPermManager *mgr, const char *repo_id); /* TODO: add start and limit. */ /* Get repos owned by this user. */ GList * seaf_perm_manager_get_repos_by_owner (SeafPermManager *mgr, const char *user_id); #endif ================================================ FILE: server/quota-mgr.c ================================================ /* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */ #include "common.h" #define DEBUG_FLAG SEAFILE_DEBUG_OTHER #include "log.h" #include "utils.h" #include "seafile-session.h" #include "seaf-db.h" #include "quota-mgr.h" #include "seaf-utils.h" #define KB 1000L #define MB 1000000L #define GB 1000000000L #define TB 1000000000000L static gint64 get_default_quota (SeafCfgManager *mgr) { char *quota_str; char *end; gint64 quota_int; gint64 multiplier = GB; gint64 quota; quota_str = seaf_cfg_manager_get_config_string (mgr, "quota", "default"); if (!quota_str) return INFINITE_QUOTA; quota_int = strtoll (quota_str, &end, 10); if (quota_int == LLONG_MIN || quota_int == LLONG_MAX) { seaf_warning ("Default quota value out of range. Use unlimited.\n"); quota = INFINITE_QUOTA; goto out; } if (*end != '\0') { if (strcasecmp(end, "kb") == 0 || strcasecmp(end, "k") == 0) multiplier = KB; else if (strcasecmp(end, "mb") == 0 || strcasecmp(end, "m") == 0) multiplier = MB; else if (strcasecmp(end, "gb") == 0 || strcasecmp(end, "g") == 0) multiplier = GB; else if (strcasecmp(end, "tb") == 0 || strcasecmp(end, "t") == 0) multiplier = TB; else { seaf_warning ("Invalid default quota format %s. Use unlimited.\n", quota_str); quota = INFINITE_QUOTA; goto out; } } quota = quota_int * multiplier; out: g_free (quota_str); return quota; } SeafQuotaManager * seaf_quota_manager_new (struct _SeafileSession *session) { SeafQuotaManager *mgr = g_new0 (SeafQuotaManager, 1); if (!mgr) return NULL; mgr->session = session; mgr->calc_share_usage = g_key_file_get_boolean (session->config, "quota", "calc_share_usage", NULL); return mgr; } int seaf_quota_manager_init (SeafQuotaManager *mgr) { if (!mgr->session->create_tables && seaf_db_type (mgr->session->db) != SEAF_DB_TYPE_PGSQL) return 0; SeafDB *db = mgr->session->db; const char *sql; switch (seaf_db_type(db)) { case SEAF_DB_TYPE_PGSQL: sql = "CREATE TABLE IF NOT EXISTS UserQuota (\"user\" VARCHAR(255) PRIMARY KEY," "quota BIGINT)"; if (seaf_db_query (db, sql) < 0) return -1; sql = "CREATE TABLE IF NOT EXISTS UserShareQuota (\"user\" VARCHAR(255) PRIMARY KEY," "quota BIGINT)"; if (seaf_db_query (db, sql) < 0) return -1; sql = "CREATE TABLE IF NOT EXISTS OrgQuota (org_id INTEGER PRIMARY KEY," "quota BIGINT)"; if (seaf_db_query (db, sql) < 0) return -1; sql = "CREATE TABLE IF NOT EXISTS OrgUserQuota (org_id INTEGER," "\"user\" VARCHAR(255), quota BIGINT, PRIMARY KEY (org_id, \"user\"))"; if (seaf_db_query (db, sql) < 0) return -1; break; case SEAF_DB_TYPE_SQLITE: sql = "CREATE TABLE IF NOT EXISTS UserQuota (user VARCHAR(255) PRIMARY KEY," "quota BIGINT)"; if (seaf_db_query (db, sql) < 0) return -1; sql = "CREATE TABLE IF NOT EXISTS UserShareQuota (user VARCHAR(255) PRIMARY KEY," "quota BIGINT)"; if (seaf_db_query (db, sql) < 0) return -1; sql = "CREATE TABLE IF NOT EXISTS OrgQuota (org_id INTEGER PRIMARY KEY," "quota BIGINT)"; if (seaf_db_query (db, sql) < 0) return -1; sql = "CREATE TABLE IF NOT EXISTS OrgUserQuota (org_id INTEGER," "user VARCHAR(255), quota BIGINT, PRIMARY KEY (org_id, user))"; if (seaf_db_query (db, sql) < 0) return -1; break; case SEAF_DB_TYPE_MYSQL: sql = "CREATE TABLE IF NOT EXISTS UserQuota (id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, " "user VARCHAR(255)," "quota BIGINT, UNIQUE INDEX(user)) ENGINE=INNODB"; if (seaf_db_query (db, sql) < 0) return -1; sql = "CREATE TABLE IF NOT EXISTS UserShareQuota (id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, " "user VARCHAR(255)," "quota BIGINT, UNIQUE INDEX(user)) ENGINE=INNODB"; if (seaf_db_query (db, sql) < 0) return -1; sql = "CREATE TABLE IF NOT EXISTS OrgQuota (id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, " "org_id INTEGER," "quota BIGINT, UNIQUE INDEX(org_id)) ENGINE=INNODB"; if (seaf_db_query (db, sql) < 0) return -1; sql = "CREATE TABLE IF NOT EXISTS OrgUserQuota (id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, " "org_id INTEGER," "user VARCHAR(255), quota BIGINT, UNIQUE INDEX(org_id, user))" "ENGINE=INNODB"; if (seaf_db_query (db, sql) < 0) return -1; break; } return 0; } int seaf_quota_manager_set_user_quota (SeafQuotaManager *mgr, const char *user, gint64 quota) { SeafDB *db = mgr->session->db; if (seaf_db_type(db) == SEAF_DB_TYPE_PGSQL) { gboolean exists, err; int rc; exists = seaf_db_statement_exists (db, "SELECT 1 FROM UserQuota WHERE \"user\"=?", &err, 1, "string", user); if (err) return -1; if (exists) rc = seaf_db_statement_query (db, "UPDATE UserQuota SET quota=? " "WHERE \"user\"=?", 2, "int64", quota, "string", user); else rc = seaf_db_statement_query (db, "INSERT INTO UserQuota (\"user\", quota) VALUES " "(?, ?)", 2, "string", user, "int64", quota); return rc; } else { int rc; rc = seaf_db_statement_query (db, "REPLACE INTO UserQuota (user, quota) VALUES (?, ?)", 2, "string", user, "int64", quota); return rc; } } gint64 seaf_quota_manager_get_user_quota (SeafQuotaManager *mgr, const char *user) { char *sql; gint64 quota; if (seaf_db_type(mgr->session->db) != SEAF_DB_TYPE_PGSQL) sql = "SELECT quota FROM UserQuota WHERE user=?"; else sql = "SELECT quota FROM UserQuota WHERE \"user\"=?"; quota = seaf_db_statement_get_int64 (mgr->session->db, sql, 1, "string", user); if (quota <= 0) quota = get_default_quota (seaf->cfg_mgr); return quota; } int seaf_quota_manager_set_org_quota (SeafQuotaManager *mgr, int org_id, gint64 quota) { SeafDB *db = mgr->session->db; if (seaf_db_type(db) == SEAF_DB_TYPE_PGSQL) { gboolean exists, err; int rc; exists = seaf_db_statement_exists (db, "SELECT 1 FROM OrgQuota WHERE org_id=?", &err, 1, "int", org_id); if (err) return -1; if (exists) rc = seaf_db_statement_query (db, "UPDATE OrgQuota SET quota=? WHERE org_id=?", 2, "int64", quota, "int", org_id); else rc = seaf_db_statement_query (db, "INSERT INTO OrgQuota (org_id, quota) VALUES (?, ?)", 2, "int", org_id, "int64", quota); return rc; } else { int rc = seaf_db_statement_query (db, "REPLACE INTO OrgQuota (org_id, quota) VALUES (?, ?)", 2, "int", org_id, "int64", quota); return rc; } } gint64 seaf_quota_manager_get_org_quota (SeafQuotaManager *mgr, int org_id) { char *sql; gint64 quota; sql = "SELECT quota FROM OrgQuota WHERE org_id=?"; quota = seaf_db_statement_get_int64 (mgr->session->db, sql, 1, "int", org_id); if (quota <= 0) quota = get_default_quota (seaf->cfg_mgr); return quota; } int seaf_quota_manager_set_org_user_quota (SeafQuotaManager *mgr, int org_id, const char *user, gint64 quota) { SeafDB *db = mgr->session->db; int rc; if (seaf_db_type(db) == SEAF_DB_TYPE_PGSQL) { gboolean exists, err; exists = seaf_db_statement_exists (db, "SELECT 1 FROM OrgUserQuota " "WHERE org_id=? AND \"user\"=?", &err, 2, "int", org_id, "string", user); if (err) return -1; if (exists) rc = seaf_db_statement_query (db, "UPDATE OrgUserQuota SET quota=?" " WHERE org_id=? AND \"user\"=?", 3, "int64", quota, "int", org_id, "string", user); else rc = seaf_db_statement_query (db, "INSERT INTO OrgUserQuota (org_id, \"user\", quota) VALUES " "(?, ?, ?)", 3, "int", org_id, "string", user, "int64", quota); return rc; } else { rc = seaf_db_statement_query (db, "REPLACE INTO OrgUserQuota (org_id, user, quota) VALUES (?, ?, ?)", 3, "int", org_id, "string", user, "int64", quota); return rc; } } gint64 seaf_quota_manager_get_org_user_quota (SeafQuotaManager *mgr, int org_id, const char *user) { char *sql; gint64 quota; if (seaf_db_type(mgr->session->db) != SEAF_DB_TYPE_PGSQL) sql = "SELECT quota FROM OrgUserQuota WHERE org_id=? AND user=?"; else sql = "SELECT quota FROM OrgUserQuota WHERE org_id=? AND \"user\"=?"; quota = seaf_db_statement_get_int64 (mgr->session->db, sql, 2, "int", org_id, "string", user); /* return org quota if per user quota is not set. */ if (quota <= 0) quota = seaf_quota_manager_get_org_quota (mgr, org_id); return quota; } static void count_group_members (GHashTable *user_hash, GList *members) { GList *p; CcnetGroupUser *user; const char *user_name; int dummy; for (p = members; p; p = p->next) { user = p->data; user_name = ccnet_group_user_get_user_name (user); g_hash_table_insert (user_hash, g_strdup(user_name), &dummy); /* seaf_debug ("Shared to %s.\n", user_name); */ g_object_unref (user); } g_list_free (members); } static gint get_num_shared_to (const char *user, const char *repo_id) { GHashTable *user_hash; int dummy; GList *personal = NULL, *groups = NULL, *members = NULL, *p; gint n_shared_to = -1; /* seaf_debug ("Computing share usage for repo %s.\n", repo_id); */ /* If a repo is shared to both a user and a group, and that user is also * a member of the group, we don't want to count that user twice. * This also applies to two groups with overlapped members. * So we have to use a hash table to filter out duplicated users. */ user_hash = g_hash_table_new_full (g_str_hash, g_str_equal, g_free, NULL); /* First count personal share */ personal = seaf_share_manager_list_shared_to (seaf->share_mgr, user, repo_id); for (p = personal; p; p = p->next) { char *email = p->data; g_hash_table_insert (user_hash, g_strdup(email), &dummy); /* seaf_debug ("Shared to %s.\n", email); */ } /* Then groups... */ groups = seaf_repo_manager_get_groups_by_repo (seaf->repo_mgr, repo_id, NULL); for (p = groups; p; p = p->next) { members = ccnet_group_manager_get_group_members (seaf->group_mgr, (int)(long)p->data, -1, -1, NULL); if (!members) { seaf_warning ("Cannot get member list for groupd %d.\n", (int)(long)p->data); goto out; } count_group_members (user_hash, members); } /* Remove myself if i'm in a group. */ g_hash_table_remove (user_hash, user); n_shared_to = g_hash_table_size(user_hash); /* seaf_debug ("n_shared_to = %u.\n", n_shared_to); */ out: g_hash_table_destroy (user_hash); string_list_free (personal); g_list_free (groups); return n_shared_to; } int seaf_quota_manager_check_quota_with_delta (SeafQuotaManager *mgr, const char *repo_id, gint64 delta) { SeafVirtRepo *vinfo; const char *r_repo_id = repo_id; char *user = NULL; gint64 quota, usage; int ret = 0; /* If it's a virtual repo, check quota to origin repo. */ vinfo = seaf_repo_manager_get_virtual_repo_info (seaf->repo_mgr, repo_id); if (vinfo) r_repo_id = vinfo->origin_repo_id; user = seaf_repo_manager_get_repo_owner (seaf->repo_mgr, r_repo_id); if (user != NULL) { if (g_strrstr (user, "dtable@seafile") != NULL) goto out; quota = seaf_quota_manager_get_user_quota (mgr, user); } else { seaf_warning ("Repo %s has no owner.\n", r_repo_id); ret = -1; goto out; } if (quota == INFINITE_QUOTA) goto out; usage = seaf_quota_manager_get_user_usage (mgr, user); if (usage < 0) { ret = -1; goto out; } if (delta != 0) { usage += delta; } if (usage >= quota) { ret = 1; } out: seaf_virtual_repo_info_free (vinfo); g_free (user); return ret; } int seaf_quota_manager_check_quota (SeafQuotaManager *mgr, const char *repo_id) { int ret = seaf_quota_manager_check_quota_with_delta (mgr, repo_id, 0); if (ret == 1) { return -1; } return ret; } gint64 seaf_quota_manager_get_user_usage (SeafQuotaManager *mgr, const char *user) { char *sql; sql = "SELECT SUM(size) FROM " "RepoOwner o LEFT JOIN VirtualRepo v ON o.repo_id=v.repo_id, " "RepoSize WHERE " "owner_id=? AND o.repo_id=RepoSize.repo_id " "AND v.repo_id IS NULL"; return seaf_db_statement_get_int64 (mgr->session->db, sql, 1, "string", user); /* Add size of repos in trash. */ /* sql = "SELECT size FROM RepoTrash WHERE owner_id = ?"; */ /* if (seaf_db_statement_foreach_row (mgr->session->db, sql, */ /* get_total_size, &total, */ /* 1, "string", user) < 0) */ /* return -1; */ } static gint64 repo_share_usage (const char *user, const char *repo_id) { gint n_shared_to = get_num_shared_to (user, repo_id); if (n_shared_to < 0) { return -1; } else if (n_shared_to == 0) { return 0; } gint64 size = seaf_repo_manager_get_repo_size (seaf->repo_mgr, repo_id); if (size < 0) { seaf_warning ("Cannot get size of repo %s.\n", repo_id); return -1; } /* share_usage = repo_size * n_shared_to */ gint64 usage = size * n_shared_to; return usage; } gint64 seaf_quota_manager_get_user_share_usage (SeafQuotaManager *mgr, const char *user) { GList *repos, *p; char *repo_id; gint64 total = 0, per_repo; repos = seaf_repo_manager_get_repo_ids_by_owner (seaf->repo_mgr, user); for (p = repos; p != NULL; p = p->next) { repo_id = p->data; per_repo = repo_share_usage (user, repo_id); if (per_repo < 0) { seaf_warning ("Failed to get repo %s share usage.\n", repo_id); string_list_free (repos); return -1; } total += per_repo; } string_list_free (repos); return total; } gint64 seaf_quota_manager_get_org_usage (SeafQuotaManager *mgr, int org_id) { char *sql; sql = "SELECT SUM(size) FROM OrgRepo, RepoSize WHERE " "org_id=? AND OrgRepo.repo_id=RepoSize.repo_id"; return seaf_db_statement_get_int64 (mgr->session->db, sql, 1, "int", org_id); } gint64 seaf_quota_manager_get_org_user_usage (SeafQuotaManager *mgr, int org_id, const char *user) { char *sql; sql = "SELECT SUM(size) FROM OrgRepo, RepoSize WHERE " "org_id=? AND user = ? AND OrgRepo.repo_id=RepoSize.repo_id"; return seaf_db_statement_get_int64 (mgr->session->db, sql, 2, "int", org_id, "string", user); } static gboolean collect_user_and_usage (SeafDBRow *row, void *data) { GList **p = data; const char *user; gint64 usage; user = seaf_db_row_get_column_text (row, 0); usage = seaf_db_row_get_column_int64 (row, 1); if (!user) return TRUE; SeafileUserQuotaUsage *user_usage= g_object_new (SEAFILE_TYPE_USER_QUOTA_USAGE, "user", user, "usage", usage, NULL); if (!user_usage) return FALSE; *p = g_list_prepend (*p, user_usage); return TRUE; } GList * seaf_repo_quota_manager_list_user_quota_usage (SeafQuotaManager *mgr) { GList *ret = NULL; char *sql = NULL; sql = "SELECT owner_id,SUM(size) FROM " "RepoOwner o LEFT JOIN VirtualRepo v ON o.repo_id=v.repo_id, " "RepoSize WHERE " "o.repo_id=RepoSize.repo_id " "AND v.repo_id IS NULL " "GROUP BY owner_id"; if (seaf_db_statement_foreach_row (mgr->session->db, sql, collect_user_and_usage, &ret, 0) < 0) { g_list_free_full (ret, g_object_unref); return NULL; } return g_list_reverse (ret); } ================================================ FILE: server/quota-mgr.h ================================================ /* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */ #ifndef QUOTA_MGR_H #define QUOTA_MGR_H #define INFINITE_QUOTA (gint64)-2 struct _SeafQuotaManager { struct _SeafileSession *session; gboolean calc_share_usage; }; typedef struct _SeafQuotaManager SeafQuotaManager; SeafQuotaManager * seaf_quota_manager_new (struct _SeafileSession *session); int seaf_quota_manager_init (SeafQuotaManager *mgr); /* Set/get quota for a personal account. */ int seaf_quota_manager_set_user_quota (SeafQuotaManager *mgr, const char *user, gint64 quota); gint64 seaf_quota_manager_get_user_quota (SeafQuotaManager *mgr, const char *user); gint64 seaf_quota_manager_get_user_share_usage (SeafQuotaManager *mgr, const char *user); /* * Check if @repo_id still has free space for upload. */ int seaf_quota_manager_check_quota (SeafQuotaManager *mgr, const char *repo_id); // ret = 0 means doesn't exceed quota, // 1 means exceed quota, // -1 means internal error int seaf_quota_manager_check_quota_with_delta (SeafQuotaManager *mgr, const char *repo_id, gint64 delta); gint64 seaf_quota_manager_get_user_usage (SeafQuotaManager *mgr, const char *user); GList * seaf_repo_quota_manager_list_user_quota_usage (SeafQuotaManager *mgr); #endif ================================================ FILE: server/repo-mgr.c ================================================ /* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */ #include "common.h" #include #include #include #include #include "utils.h" #include "log.h" #include "seafile-session.h" #include "commit-mgr.h" #include "branch-mgr.h" #include "repo-mgr.h" #include "fs-mgr.h" #include "seafile-error.h" #include "seafile-crypt.h" #include "password-hash.h" #include "seaf-db.h" #include "seaf-utils.h" #define REAP_TOKEN_INTERVAL 300 /* 5 mins */ #define DECRYPTED_TOKEN_TTL 3600 /* 1 hour */ #define SCAN_TRASH_DAYS 1 /* one day */ #define TRASH_EXPIRE_DAYS 30 /* one month */ typedef struct DecryptedToken { char *token; gint64 reap_time; } DecryptedToken; struct _SeafRepoManagerPriv { /* (encrypted_token, session_key) -> decrypted token */ GHashTable *decrypted_tokens; pthread_rwlock_t lock; CcnetTimer *reap_token_timer; CcnetTimer *scan_trash_timer; }; static void load_repo (SeafRepoManager *manager, SeafRepo *repo); static int create_db_tables_if_not_exist (SeafRepoManager *mgr); static int save_branch_repo_map (SeafRepoManager *manager, SeafBranch *branch); static int reap_token (void *data); static void decrypted_token_free (DecryptedToken *token); gboolean is_repo_id_valid (const char *id) { if (!id) return FALSE; return is_uuid_valid (id); } SeafRepo* seaf_repo_new (const char *id, const char *name, const char *desc) { SeafRepo* repo; /* valid check */ repo = g_new0 (SeafRepo, 1); memcpy (repo->id, id, 36); repo->id[36] = '\0'; repo->name = g_strdup(name); repo->desc = g_strdup(desc); repo->ref_cnt = 1; return repo; } void seaf_repo_free (SeafRepo *repo) { if (repo->name) g_free (repo->name); if (repo->desc) g_free (repo->desc); if (repo->head) seaf_branch_unref (repo->head); if (repo->virtual_info) seaf_virtual_repo_info_free (repo->virtual_info); g_free (repo->last_modifier); g_free (repo->pwd_hash_algo); g_free (repo->pwd_hash_params); g_free (repo->type); g_free (repo); } void seaf_repo_ref (SeafRepo *repo) { g_atomic_int_inc (&repo->ref_cnt); } void seaf_repo_unref (SeafRepo *repo) { if (!repo) return; if (g_atomic_int_dec_and_test (&repo->ref_cnt)) seaf_repo_free (repo); } static void set_head_common (SeafRepo *repo, SeafBranch *branch) { if (repo->head) seaf_branch_unref (repo->head); repo->head = branch; seaf_branch_ref(branch); } int seaf_repo_set_head (SeafRepo *repo, SeafBranch *branch) { if (save_branch_repo_map (repo->manager, branch) < 0) return -1; set_head_common (repo, branch); return 0; } void seaf_repo_from_commit (SeafRepo *repo, SeafCommit *commit) { repo->name = g_strdup (commit->repo_name); repo->desc = g_strdup (commit->repo_desc); repo->encrypted = commit->encrypted; repo->repaired = commit->repaired; repo->last_modify = commit->ctime; memcpy (repo->root_id, commit->root_id, 40); if (repo->encrypted) { repo->enc_version = commit->enc_version; if (repo->enc_version == 1 && !commit->pwd_hash_algo) memcpy (repo->magic, commit->magic, 32); else if (repo->enc_version == 2) { memcpy (repo->random_key, commit->random_key, 96); } else if (repo->enc_version == 3) { memcpy (repo->random_key, commit->random_key, 96); memcpy (repo->salt, commit->salt, 64); } else if (repo->enc_version == 4) { memcpy (repo->random_key, commit->random_key, 96); memcpy (repo->salt, commit->salt, 64); } if (repo->enc_version >= 2 && !commit->pwd_hash_algo) { memcpy (repo->magic, commit->magic, 64); } if (commit->pwd_hash_algo) { memcpy (repo->pwd_hash, commit->pwd_hash, 64); repo->pwd_hash_algo = g_strdup (commit->pwd_hash_algo); repo->pwd_hash_params = g_strdup (commit->pwd_hash_params); } } repo->no_local_history = commit->no_local_history; repo->version = commit->version; repo->last_modifier = g_strdup (commit->creator_name); } void seaf_repo_to_commit (SeafRepo *repo, SeafCommit *commit) { commit->repo_name = g_strdup (repo->name); commit->repo_desc = g_strdup (repo->desc); commit->encrypted = repo->encrypted; commit->repaired = repo->repaired; if (commit->encrypted) { commit->enc_version = repo->enc_version; if (commit->enc_version == 1 && !repo->pwd_hash_algo) commit->magic = g_strdup (repo->magic); else if (commit->enc_version == 2) { commit->random_key = g_strdup (repo->random_key); } else if (commit->enc_version == 3) { commit->random_key = g_strdup (repo->random_key); commit->salt = g_strdup (repo->salt); } else if (commit->enc_version == 4) { commit->random_key = g_strdup (repo->random_key); commit->salt = g_strdup (repo->salt); } if (commit->enc_version >= 2 && !repo->pwd_hash_algo) { commit->magic = g_strdup (repo->magic); } if (repo->pwd_hash_algo) { commit->pwd_hash = g_strdup (repo->pwd_hash); commit->pwd_hash_algo = g_strdup (repo->pwd_hash_algo); commit->pwd_hash_params = g_strdup (repo->pwd_hash_params); } } commit->no_local_history = repo->no_local_history; commit->version = repo->version; } static gboolean collect_commit (SeafCommit *commit, void *vlist, gboolean *stop) { GList **commits = vlist; /* The traverse function will unref the commit, so we need to ref it. */ seaf_commit_ref (commit); *commits = g_list_prepend (*commits, commit); return TRUE; } GList * seaf_repo_get_commits (SeafRepo *repo) { GList *branches; GList *ptr; SeafBranch *branch; GList *commits = NULL; branches = seaf_branch_manager_get_branch_list (seaf->branch_mgr, repo->id); if (branches == NULL) { seaf_warning ("Failed to get branch list of repo %s.\n", repo->id); return NULL; } for (ptr = branches; ptr != NULL; ptr = ptr->next) { branch = ptr->data; gboolean res = seaf_commit_manager_traverse_commit_tree (seaf->commit_mgr, repo->id, repo->version, branch->commit_id, collect_commit, &commits, FALSE); if (!res) { for (ptr = commits; ptr != NULL; ptr = ptr->next) seaf_commit_unref ((SeafCommit *)(ptr->data)); g_list_free (commits); goto out; } } commits = g_list_reverse (commits); out: for (ptr = branches; ptr != NULL; ptr = ptr->next) { seaf_branch_unref ((SeafBranch *)ptr->data); } return commits; } gboolean should_ignore_file(const char *filename, void *data) { /* GPatternSpec **spec = ignore_patterns; */ char **components = g_strsplit (filename, "/", -1); int n_comps = g_strv_length (components); int j = 0; char *file_name; for (; j < n_comps; ++j) { file_name = components[j]; if (g_strcmp0(file_name, "..") == 0) { g_strfreev (components); return TRUE; } } g_strfreev (components); if (!g_utf8_validate (filename, -1, NULL)) { seaf_warning ("File name %s contains non-UTF8 characters, skip.\n", filename); return TRUE; } /* Ignore file/dir if its name is too long. */ if (strlen(filename) >= SEAF_DIR_NAME_LEN) return TRUE; if (strchr (filename, '/')) return TRUE; return FALSE; } static gboolean collect_repo_id (SeafDBRow *row, void *data); static int scan_trash (void *data) { GList *repo_ids = NULL; SeafRepoManager *mgr = seaf->repo_mgr; gint64 trash_expire_interval = TRASH_EXPIRE_DAYS * 24 * 3600; int expire_days = seaf_cfg_manager_get_config_int (seaf->cfg_mgr, "library_trash", "expire_days"); if (expire_days > 0) { trash_expire_interval = expire_days * 24 * 3600; } gint64 expire_time = time(NULL) - trash_expire_interval; char *sql = "SELECT repo_id FROM RepoTrash WHERE del_time <= ?"; int ret = seaf_db_statement_foreach_row (seaf->db, sql, collect_repo_id, &repo_ids, 1, "int64", expire_time); if (ret < 0) { seaf_warning ("Get expired repo from trash failed."); string_list_free (repo_ids); return TRUE; } GList *iter; char *repo_id; for (iter=repo_ids; iter; iter=iter->next) { repo_id = iter->data; ret = seaf_repo_manager_del_repo_from_trash (mgr, repo_id, NULL); if (ret < 0) break; } string_list_free (repo_ids); return TRUE; } static void init_scan_trash_timer (SeafRepoManagerPriv *priv, GKeyFile *config) { int scan_days; GError *error = NULL; scan_days = g_key_file_get_integer (config, "library_trash", "scan_days", &error); if (error) { scan_days = SCAN_TRASH_DAYS; g_clear_error (&error); } priv->scan_trash_timer = ccnet_timer_new (scan_trash, NULL, scan_days * 24 * 3600 * 1000); } SeafRepoManager* seaf_repo_manager_new (SeafileSession *seaf) { SeafRepoManager *mgr = g_new0 (SeafRepoManager, 1); mgr->priv = g_new0 (SeafRepoManagerPriv, 1); mgr->seaf = seaf; mgr->priv->decrypted_tokens = g_hash_table_new_full (g_str_hash, g_str_equal, g_free, (GDestroyNotify)decrypted_token_free); pthread_rwlock_init (&mgr->priv->lock, NULL); mgr->priv->reap_token_timer = ccnet_timer_new (reap_token, mgr, REAP_TOKEN_INTERVAL * 1000); init_scan_trash_timer (mgr->priv, seaf->config); return mgr; } int seaf_repo_manager_init (SeafRepoManager *mgr) { /* On the server, we load repos into memory on-demand, because * there are too many repos. */ if (create_db_tables_if_not_exist (mgr) < 0) { seaf_warning ("[repo mgr] failed to create tables.\n"); return -1; } if (seaf_repo_manager_init_merge_scheduler() < 0) { seaf_warning ("Failed to init merge scheduler.\n"); return -1; } return 0; } int seaf_repo_manager_start (SeafRepoManager *mgr) { return 0; } int seaf_repo_manager_add_repo (SeafRepoManager *manager, SeafRepo *repo) { SeafDB *db = manager->seaf->db; if (seaf_db_statement_query (db, "INSERT INTO Repo (repo_id) VALUES (?)", 1, "string", repo->id) < 0) return -1; repo->manager = manager; return 0; } static int add_deleted_repo_record (SeafRepoManager *mgr, const char *repo_id) { if (seaf_db_type(seaf->db) == SEAF_DB_TYPE_PGSQL) { gboolean exists, err; exists = seaf_db_statement_exists (seaf->db, "SELECT repo_id FROM GarbageRepos " "WHERE repo_id=?", &err, 1, "string", repo_id); if (err) return -1; if (!exists) { return seaf_db_statement_query(seaf->db, "INSERT INTO GarbageRepos (repo_id) VALUES (?)", 1, "string", repo_id); } return 0; } else { return seaf_db_statement_query (seaf->db, "REPLACE INTO GarbageRepos (repo_id) VALUES (?)", 1, "string", repo_id); } } static int add_deleted_repo_to_trash (SeafRepoManager *mgr, const char *repo_id, SeafCommit *commit) { char *owner = NULL; int ret = -1; owner = seaf_repo_manager_get_repo_owner (mgr, repo_id); if (!owner) { seaf_warning ("Failed to get owner for repo %.8s.\n", repo_id); goto out; } gint64 size = seaf_repo_manager_get_repo_size (mgr, repo_id); if (size == -1) { seaf_warning ("Failed to get size of repo %.8s.\n", repo_id); goto out; } ret = seaf_db_statement_query (mgr->seaf->db, "INSERT INTO RepoTrash (repo_id, repo_name, head_id, " "owner_id, size, org_id, del_time) " "values (?, ?, ?, ?, ?, -1, ?)", 6, "string", repo_id, "string", commit->repo_name, "string", commit->commit_id, "string", owner, "int64", size, "int64", (gint64)time(NULL)); out: g_free (owner); return ret; } static int remove_virtual_repo_ondisk (SeafRepoManager *mgr, const char *repo_id) { SeafDB *db = mgr->seaf->db; /* Remove record in repo table first. * Once this is commited, we can gc the other tables later even if * we're interrupted. */ if (seaf_db_statement_query (db, "DELETE FROM Repo WHERE repo_id = ?", 1, "string", repo_id) < 0) return -1; /* remove branch */ GList *p; GList *branch_list = seaf_branch_manager_get_branch_list (seaf->branch_mgr, repo_id); for (p = branch_list; p; p = p->next) { SeafBranch *b = (SeafBranch *)p->data; seaf_repo_manager_branch_repo_unmap (mgr, b); seaf_branch_manager_del_branch (seaf->branch_mgr, repo_id, b->name); } seaf_branch_list_free (branch_list); seaf_db_statement_query (db, "DELETE FROM RepoOwner WHERE repo_id = ?", 1, "string", repo_id); seaf_db_statement_query (db, "DELETE FROM SharedRepo WHERE repo_id = ?", 1, "string", repo_id); seaf_db_statement_query (db, "DELETE FROM RepoGroup WHERE repo_id = ?", 1, "string", repo_id); if (!seaf->cloud_mode) { seaf_db_statement_query (db, "DELETE FROM InnerPubRepo WHERE repo_id = ?", 1, "string", repo_id); } seaf_db_statement_query (mgr->seaf->db, "DELETE FROM RepoUserToken WHERE repo_id = ?", 1, "string", repo_id); seaf_db_statement_query (mgr->seaf->db, "DELETE FROM RepoValidSince WHERE repo_id = ?", 1, "string", repo_id); seaf_db_statement_query (mgr->seaf->db, "DELETE FROM RepoSize WHERE repo_id = ?", 1, "string", repo_id); seaf_db_statement_query (mgr->seaf->db, "DELETE FROM RepoInfo WHERE repo_id = ?", 1, "string", repo_id); /* For GC commit objects for this virtual repo. Fs and blocks are GC * from the parent repo. */ add_deleted_repo_record (mgr, repo_id); return 0; } static gboolean get_branch (SeafDBRow *row, void *vid) { char *ret = vid; const char *commit_id; commit_id = seaf_db_row_get_column_text (row, 0); memcpy (ret, commit_id, 41); return FALSE; } static SeafCommit* get_head_commit (SeafRepoManager *mgr, const char *repo_id, gboolean *has_err) { char commit_id[41]; char *sql; commit_id[0] = 0; sql = "SELECT commit_id FROM Branch WHERE name=? AND repo_id=?"; if (seaf_db_statement_foreach_row (mgr->seaf->db, sql, get_branch, commit_id, 2, "string", "master", "string", repo_id) < 0) { *has_err = TRUE; return NULL; } if (commit_id[0] == 0) return NULL; SeafCommit *head_commit = seaf_commit_manager_get_commit (seaf->commit_mgr, repo_id, 1, commit_id); return head_commit; } int seaf_repo_manager_del_repo (SeafRepoManager *mgr, const char *repo_id, GError **error) { gboolean has_err = FALSE; SeafCommit *head_commit = get_head_commit (mgr, repo_id, &has_err); if (has_err) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "Failed to get head commit from db"); return -1; } if (!head_commit) { // head commit is missing, repo has beed deleted. return 0; } if (add_deleted_repo_to_trash (mgr, repo_id, head_commit) < 0) { // Add repo to trash failed, del repo directly seaf_warning ("Failed to add repo %.8s to trash, delete directly.\n", repo_id); } seaf_commit_unref (head_commit); del_repo: if (seaf_db_statement_query (mgr->seaf->db, "DELETE FROM Repo WHERE repo_id = ?", 1, "string", repo_id) < 0) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "Failed to delete repo from db"); return -1; } /* remove branch */ GList *p; GList *branch_list = seaf_branch_manager_get_branch_list (seaf->branch_mgr, repo_id); for (p = branch_list; p; p = p->next) { SeafBranch *b = (SeafBranch *)p->data; seaf_repo_manager_branch_repo_unmap (mgr, b); seaf_branch_manager_del_branch (seaf->branch_mgr, repo_id, b->name); } seaf_branch_list_free (branch_list); seaf_db_statement_query (mgr->seaf->db, "DELETE FROM RepoOwner WHERE repo_id = ?", 1, "string", repo_id); seaf_db_statement_query (mgr->seaf->db, "DELETE FROM SharedRepo WHERE repo_id = ?", 1, "string", repo_id); seaf_db_statement_query (mgr->seaf->db, "DELETE FROM RepoGroup WHERE repo_id = ?", 1, "string", repo_id); if (!seaf->cloud_mode) { seaf_db_statement_query (mgr->seaf->db, "DELETE FROM InnerPubRepo WHERE repo_id = ?", 1, "string", repo_id); } seaf_db_statement_query (mgr->seaf->db, "DELETE t.*, i.* FROM RepoUserToken t, " "RepoTokenPeerInfo i WHERE t.token=i.token AND " "t.repo_id=?", 1, "string", repo_id); seaf_db_statement_query (mgr->seaf->db, "DELETE FROM RepoHistoryLimit WHERE repo_id = ?", 1, "string", repo_id); seaf_db_statement_query (mgr->seaf->db, "DELETE FROM RepoValidSince WHERE repo_id = ?", 1, "string", repo_id); seaf_db_statement_query (mgr->seaf->db, "DELETE FROM RepoSize WHERE repo_id = ?", 1, "string", repo_id); /* Remove virtual repos when origin repo is deleted. */ GList *vrepos, *ptr; vrepos = seaf_repo_manager_get_virtual_repo_ids_by_origin (mgr, repo_id); for (ptr = vrepos; ptr != NULL; ptr = ptr->next) remove_virtual_repo_ondisk (mgr, (char *)ptr->data); string_list_free (vrepos); seaf_db_statement_query (mgr->seaf->db, "DELETE FROM RepoInfo " "WHERE repo_id=?", 1, "string", repo_id); seaf_db_statement_query (mgr->seaf->db, "DELETE FROM VirtualRepo " "WHERE repo_id=? OR origin_repo=?", 2, "string", repo_id, "string", repo_id); if (!head_commit) add_deleted_repo_record(mgr, repo_id); return 0; } int seaf_repo_manager_del_virtual_repo (SeafRepoManager *mgr, const char *repo_id) { int ret = remove_virtual_repo_ondisk (mgr, repo_id); if (ret < 0) return ret; return seaf_db_statement_query (mgr->seaf->db, "DELETE FROM VirtualRepo WHERE repo_id = ?", 1, "string", repo_id); } static gboolean repo_exists_in_db (SeafDB *db, const char *id, gboolean *db_err) { return seaf_db_statement_exists (db, "SELECT repo_id FROM Repo WHERE repo_id = ?", db_err, 1, "string", id); } gboolean create_repo_fill_size (SeafDBRow *row, void *data) { SeafRepo **repo = data; SeafBranch *head; const char *repo_id = seaf_db_row_get_column_text (row, 0); gint64 size = seaf_db_row_get_column_int64 (row, 1); const char *commit_id = seaf_db_row_get_column_text (row, 2); const char *vrepo_id = seaf_db_row_get_column_text (row, 3); gint64 file_count = seaf_db_row_get_column_int64 (row, 7); int status = seaf_db_row_get_column_int(row, 8); const char *type = seaf_db_row_get_column_text (row, 9); *repo = seaf_repo_new (repo_id, NULL, NULL); if (!*repo) return FALSE; if (!commit_id) { (*repo)->is_corrupted = TRUE; return FALSE; } (*repo)->size = size; (*repo)->file_count = file_count; head = seaf_branch_new ("master", repo_id, commit_id); (*repo)->head = head; (*repo)->status = status; if (vrepo_id) { const char *origin_repo_id = seaf_db_row_get_column_text (row, 4); const char *origin_path = seaf_db_row_get_column_text (row, 5); const char *base_commit = seaf_db_row_get_column_text (row, 6); SeafVirtRepo *vinfo = g_new0 (SeafVirtRepo, 1); memcpy (vinfo->repo_id, vrepo_id, 36); memcpy (vinfo->origin_repo_id, origin_repo_id, 36); vinfo->path = g_strdup(origin_path); memcpy (vinfo->base_commit, base_commit, 40); (*repo)->virtual_info = vinfo; memcpy ((*repo)->store_id, origin_repo_id, 36); } else { memcpy ((*repo)->store_id, repo_id, 36); } if (type) { (*repo)->type = g_strdup(type); } return TRUE; } static SeafRepo* get_repo_from_db (SeafRepoManager *mgr, const char *id, gboolean *db_err) { SeafRepo *repo = NULL; const char *sql; if (seaf_db_type(mgr->seaf->db) != SEAF_DB_TYPE_PGSQL) sql = "SELECT r.repo_id, s.size, b.commit_id, " "v.repo_id, v.origin_repo, v.path, v.base_commit, fc.file_count, i.status, i.type FROM " "Repo r LEFT JOIN Branch b ON r.repo_id = b.repo_id " "LEFT JOIN RepoSize s ON r.repo_id = s.repo_id " "LEFT JOIN VirtualRepo v ON r.repo_id = v.repo_id " "LEFT JOIN RepoFileCount fc ON r.repo_id = fc.repo_id " "LEFT JOIN RepoInfo i on r.repo_id = i.repo_id " "WHERE r.repo_id = ? AND b.name = 'master'"; else sql = "SELECT r.repo_id, s.\"size\", b.commit_id, " "v.repo_id, v.origin_repo, v.path, v.base_commit, fc.file_count, i.status FROM " "Repo r LEFT JOIN Branch b ON r.repo_id = b.repo_id " "LEFT JOIN RepoSize s ON r.repo_id = s.repo_id " "LEFT JOIN VirtualRepo v ON r.repo_id = v.repo_id " "LEFT JOIN RepoFileCount fc ON r.repo_id = fc.repo_id " "LEFT JOIN RepoInfo i on r.repo_id = i.repo_id " "WHERE r.repo_id = ? AND b.name = 'master'"; int ret = seaf_db_statement_foreach_row (mgr->seaf->db, sql, create_repo_fill_size, &repo, 1, "string", id); if (ret < 0) *db_err = TRUE; return repo; } SeafRepo* seaf_repo_manager_get_repo (SeafRepoManager *manager, const gchar *id) { int len = strlen(id); SeafRepo *repo = NULL; gboolean has_err = FALSE; if (len >= 37) return NULL; repo = get_repo_from_db (manager, id, &has_err); if (repo) { if (repo->is_corrupted) { seaf_repo_unref (repo); return NULL; } load_repo (manager, repo); if (repo->is_corrupted) { seaf_repo_unref (repo); return NULL; } } return repo; } SeafRepo* seaf_repo_manager_get_repo_ex (SeafRepoManager *manager, const gchar *id) { int len = strlen(id); gboolean has_err = FALSE; SeafRepo *ret = NULL; if (len >= 37) return NULL; ret = get_repo_from_db (manager, id, &has_err); if (has_err) { ret = seaf_repo_new(id, NULL, NULL); ret->is_corrupted = TRUE; return ret; } if (ret) { if (ret->is_corrupted) { return ret; } load_repo (manager, ret); } return ret; } gboolean seaf_repo_manager_repo_exists (SeafRepoManager *manager, const gchar *id) { gboolean db_err = FALSE; return repo_exists_in_db (manager->seaf->db, id, &db_err); } static int save_branch_repo_map (SeafRepoManager *manager, SeafBranch *branch) { if (seaf_db_type(seaf->db) == SEAF_DB_TYPE_PGSQL) { gboolean exists, err; int rc; exists = seaf_db_statement_exists (seaf->db, "SELECT repo_id FROM RepoHead WHERE repo_id=?", &err, 1, "string", branch->repo_id); if (err) return -1; if (exists) rc = seaf_db_statement_query (seaf->db, "UPDATE RepoHead SET branch_name=? " "WHERE repo_id=?", 2, "string", branch->name, "string", branch->repo_id); else rc = seaf_db_statement_query (seaf->db, "INSERT INTO RepoHead (repo_id, branch_name) VALUES (?, ?)", 2, "string", branch->repo_id, "string", branch->name); return rc; } else { return seaf_db_statement_query (seaf->db, "REPLACE INTO RepoHead (repo_id, branch_name) VALUES (?, ?)", 2, "string", branch->repo_id, "string", branch->name); } return -1; } int seaf_repo_manager_branch_repo_unmap (SeafRepoManager *manager, SeafBranch *branch) { return seaf_db_statement_query (seaf->db, "DELETE FROM RepoHead WHERE branch_name = ?" " AND repo_id = ?", 2, "string", branch->name, "string", branch->repo_id); } int set_repo_commit_to_db (const char *repo_id, const char *repo_name, gint64 update_time, int version, gboolean is_encrypted, const char *last_modifier) { char *sql; gboolean exists = FALSE, db_err = FALSE; sql = "SELECT 1 FROM RepoInfo WHERE repo_id=?"; exists = seaf_db_statement_exists (seaf->db, sql, &db_err, 1, "string", repo_id); if (db_err) return -1; if (update_time == 0) update_time = (gint64)time(NULL); if (exists) { sql = "UPDATE RepoInfo SET name=?, update_time=?, version=?, is_encrypted=?, " "last_modifier=? WHERE repo_id=?"; if (seaf_db_statement_query (seaf->db, sql, 6, "string", repo_name, "int64", update_time, "int", version, "int", (is_encrypted ? 1:0), "string", last_modifier, "string", repo_id) < 0) { seaf_warning ("Failed to update repo info for repo %s.\n", repo_id); return -1; } } else { sql = "INSERT INTO RepoInfo (repo_id, name, update_time, version, is_encrypted, last_modifier) " "VALUES (?, ?, ?, ?, ?, ?)"; if (seaf_db_statement_query (seaf->db, sql, 6, "string", repo_id, "string", repo_name, "int64", update_time, "int", version, "int", (is_encrypted ? 1:0), "string", last_modifier) < 0) { seaf_warning ("Failed to add repo info for repo %s.\n", repo_id); return -1; } } return 0; } static void load_repo_commit (SeafRepoManager *manager, SeafRepo *repo) { SeafCommit *commit; commit = seaf_commit_manager_get_commit_compatible (manager->seaf->commit_mgr, repo->id, repo->head->commit_id); if (!commit) { seaf_warning ("Commit %s:%s is missing\n", repo->id, repo->head->commit_id); repo->is_corrupted = TRUE; return; } seaf_repo_from_commit (repo, commit); seaf_commit_unref (commit); } static void load_repo (SeafRepoManager *manager, SeafRepo *repo) { repo->manager = manager; load_repo_commit (manager, repo); } static void load_mini_repo (SeafRepoManager *manager, SeafRepo *repo) { repo->manager = manager; SeafCommit *commit; commit = seaf_commit_manager_get_commit_compatible (manager->seaf->commit_mgr, repo->id, repo->head->commit_id); if (!commit) { seaf_warning ("Commit %s:%s is missing\n", repo->id, repo->head->commit_id); repo->is_corrupted = TRUE; return; } repo->name = g_strdup (commit->repo_name); repo->encrypted = commit->encrypted; repo->last_modify = commit->ctime; repo->version = commit->version; repo->last_modifier = g_strdup (commit->creator_name); seaf_commit_unref (commit); } static int create_tables_mysql (SeafRepoManager *mgr) { SeafDB *db = mgr->seaf->db; char *sql; sql = "CREATE TABLE IF NOT EXISTS Repo (id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, " "repo_id CHAR(37), UNIQUE INDEX (repo_id))" "ENGINE=INNODB"; if (seaf_db_query (db, sql) < 0) return -1; sql = "CREATE TABLE IF NOT EXISTS RepoOwner (" "id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, " "repo_id CHAR(37), " "owner_id VARCHAR(255)," "UNIQUE INDEX (repo_id), INDEX (owner_id))" "ENGINE=INNODB"; if (seaf_db_query (db, sql) < 0) return -1; sql = "CREATE TABLE IF NOT EXISTS RepoGroup (id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT," "repo_id CHAR(37), " "group_id INTEGER, user_name VARCHAR(255), permission CHAR(15), " "UNIQUE INDEX (group_id, repo_id), " "INDEX (repo_id), INDEX (user_name))" "ENGINE=INNODB"; if (seaf_db_query (db, sql) < 0) return -1; sql = "CREATE TABLE IF NOT EXISTS InnerPubRepo (" "id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, " "repo_id CHAR(37)," "permission CHAR(15), UNIQUE INDEX (repo_id))" "ENGINE=INNODB"; if (seaf_db_query (db, sql) < 0) return -1; sql = "CREATE TABLE IF NOT EXISTS RepoUserToken (" "id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, " "repo_id CHAR(37), " "email VARCHAR(255), " "token CHAR(41), " "UNIQUE INDEX(repo_id, token), INDEX(token), INDEX (email))" "ENGINE=INNODB"; if (seaf_db_query (db, sql) < 0) return -1; sql = "CREATE TABLE IF NOT EXISTS RepoTokenPeerInfo (" "id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, " "token CHAR(41), " "peer_id CHAR(41), " "peer_ip VARCHAR(50), " "peer_name VARCHAR(255), " "sync_time BIGINT, " "client_ver VARCHAR(20), UNIQUE INDEX(token), INDEX(peer_id))" "ENGINE=INNODB"; if (seaf_db_query (db, sql) < 0) return -1; sql = "CREATE TABLE IF NOT EXISTS RepoHead (" "id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, " "repo_id CHAR(37), branch_name VARCHAR(10), UNIQUE INDEX(repo_id))" "ENGINE=INNODB"; if (seaf_db_query (db, sql) < 0) return -1; sql = "CREATE TABLE IF NOT EXISTS RepoSize (" "id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, " "repo_id CHAR(37)," "size BIGINT UNSIGNED," "head_id CHAR(41), UNIQUE INDEX (repo_id))" "ENGINE=INNODB"; if (seaf_db_query (db, sql) < 0) return -1; sql = "CREATE TABLE IF NOT EXISTS RepoHistoryLimit (" "id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, " "repo_id CHAR(37), days INTEGER, UNIQUE INDEX(repo_id))" "ENGINE=INNODB"; if (seaf_db_query (db, sql) < 0) return -1; sql = "CREATE TABLE IF NOT EXISTS RepoValidSince (" "id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, " "repo_id CHAR(37), timestamp BIGINT, UNIQUE INDEX(repo_id))" "ENGINE=INNODB"; if (seaf_db_query (db, sql) < 0) return -1; sql = "CREATE TABLE IF NOT EXISTS WebAP (id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, " "repo_id CHAR(37), " "access_property CHAR(10), UNIQUE INDEX(repo_id))" "ENGINE=INNODB"; if (seaf_db_query (db, sql) < 0) return -1; sql = "CREATE TABLE IF NOT EXISTS VirtualRepo (id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, " "repo_id CHAR(36)," "origin_repo CHAR(36), path TEXT, base_commit CHAR(40), UNIQUE INDEX(repo_id), INDEX(origin_repo))" "ENGINE=INNODB"; if (seaf_db_query (db, sql) < 0) return -1; sql = "CREATE TABLE IF NOT EXISTS GarbageRepos (id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, " "repo_id CHAR(36), UNIQUE INDEX(repo_id))"; if (seaf_db_query (db, sql) < 0) return -1; /* Tables for online GC */ sql = "CREATE TABLE IF NOT EXISTS GCID (id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, " "repo_id CHAR(36), gc_id CHAR(36), UNIQUE INDEX(repo_id)) ENGINE=INNODB"; if (seaf_db_query (db, sql) < 0) return -1; sql = "CREATE TABLE IF NOT EXISTS LastGCID (id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, " "repo_id CHAR(36), client_id VARCHAR(128), gc_id CHAR(36), UNIQUE INDEX(repo_id, client_id)) ENGINE=INNODB"; if (seaf_db_query (db, sql) < 0) return -1; sql = "CREATE TABLE IF NOT EXISTS RepoTrash (id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, " "repo_id CHAR(36)," "repo_name VARCHAR(255), head_id CHAR(40), owner_id VARCHAR(255)," "size BIGINT(20), org_id INTEGER, del_time BIGINT, " "UNIQUE INDEX(repo_id), INDEX(owner_id), INDEX(org_id))ENGINE=INNODB"; if (seaf_db_query (db, sql) < 0) return -1; sql = "CREATE TABLE IF NOT EXISTS RepoFileCount (" "id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, " "repo_id CHAR(36)," "file_count BIGINT UNSIGNED, UNIQUE INDEX(repo_id))ENGINE=INNODB"; if (seaf_db_query (db, sql) < 0) return -1; sql = "CREATE TABLE IF NOT EXISTS RepoInfo (id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, " "repo_id CHAR(36), " "name VARCHAR(255) NOT NULL, update_time BIGINT, version INTEGER, " "is_encrypted INTEGER, last_modifier VARCHAR(255), status INTEGER DEFAULT 0, type VARCHAR(10), " "UNIQUE INDEX(repo_id), INDEX(type)) ENGINE=INNODB"; if (seaf_db_query (db, sql) < 0) return -1; sql = "CREATE TABLE IF NOT EXISTS WebUploadTempFiles ( " "id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, repo_id CHAR(40) NOT NULL, " "file_path TEXT NOT NULL, tmp_file_path TEXT NOT NULL, INDEX(repo_id)) ENGINE=INNODB"; if (seaf_db_query (db, sql) < 0) return -1; return 0; } static int create_tables_sqlite (SeafRepoManager *mgr) { SeafDB *db = mgr->seaf->db; char *sql; sql = "CREATE TABLE IF NOT EXISTS Repo (repo_id CHAR(37) PRIMARY KEY)"; if (seaf_db_query (db, sql) < 0) return -1; /* Owner */ sql = "CREATE TABLE IF NOT EXISTS RepoOwner (" "repo_id CHAR(37) PRIMARY KEY, " "owner_id TEXT)"; if (seaf_db_query (db, sql) < 0) return -1; sql = "CREATE INDEX IF NOT EXISTS OwnerIndex ON RepoOwner (owner_id)"; if (seaf_db_query (db, sql) < 0) return -1; /* Group repo */ sql = "CREATE TABLE IF NOT EXISTS RepoGroup (repo_id CHAR(37), " "group_id INTEGER, user_name TEXT, permission CHAR(15))"; if (seaf_db_query (db, sql) < 0) return -1; sql = "CREATE UNIQUE INDEX IF NOT EXISTS groupid_repoid_indx on " "RepoGroup (group_id, repo_id)"; if (seaf_db_query (db, sql) < 0) return -1; sql = "CREATE INDEX IF NOT EXISTS repogroup_repoid_index on " "RepoGroup (repo_id)"; if (seaf_db_query (db, sql) < 0) return -1; sql = "CREATE INDEX IF NOT EXISTS repogroup_username_indx on " "RepoGroup (user_name)"; if (seaf_db_query (db, sql) < 0) return -1; /* Public repo */ sql = "CREATE TABLE IF NOT EXISTS InnerPubRepo (" "repo_id CHAR(37) PRIMARY KEY," "permission CHAR(15))"; if (seaf_db_query (db, sql) < 0) return -1; sql = "CREATE TABLE IF NOT EXISTS RepoUserToken (" "repo_id CHAR(37), " "email VARCHAR(255), " "token CHAR(41))"; if (seaf_db_query (db, sql) < 0) return -1; sql = "CREATE UNIQUE INDEX IF NOT EXISTS repo_token_indx on " "RepoUserToken (repo_id, token)"; if (seaf_db_query (db, sql) < 0) return -1; sql = "CREATE INDEX IF NOT EXISTS repo_token_email_indx on " "RepoUserToken (email)"; if (seaf_db_query (db, sql) < 0) return -1; sql = "CREATE TABLE IF NOT EXISTS RepoTokenPeerInfo (" "token CHAR(41) PRIMARY KEY, " "peer_id CHAR(41), " "peer_ip VARCHAR(50), " "peer_name VARCHAR(255), " "sync_time BIGINT, " "client_ver VARCHAR(20))"; if (seaf_db_query (db, sql) < 0) return -1; sql = "CREATE TABLE IF NOT EXISTS RepoHead (" "repo_id CHAR(37) PRIMARY KEY, branch_name VARCHAR(10))"; if (seaf_db_query (db, sql) < 0) return -1; sql = "CREATE TABLE IF NOT EXISTS RepoSize (" "repo_id CHAR(37) PRIMARY KEY," "size BIGINT UNSIGNED," "head_id CHAR(41))"; if (seaf_db_query (db, sql) < 0) return -1; sql = "CREATE TABLE IF NOT EXISTS RepoHistoryLimit (" "repo_id CHAR(37) PRIMARY KEY, days INTEGER)"; if (seaf_db_query (db, sql) < 0) return -1; sql = "CREATE TABLE IF NOT EXISTS RepoValidSince (" "repo_id CHAR(37) PRIMARY KEY, timestamp BIGINT)"; if (seaf_db_query (db, sql) < 0) return -1; sql = "CREATE TABLE IF NOT EXISTS WebAP (repo_id CHAR(37) PRIMARY KEY, " "access_property CHAR(10))"; if (seaf_db_query (db, sql) < 0) return -1; sql = "CREATE TABLE IF NOT EXISTS VirtualRepo (repo_id CHAR(36) PRIMARY KEY," "origin_repo CHAR(36), path TEXT, base_commit CHAR(40))"; if (seaf_db_query (db, sql) < 0) return -1; sql = "CREATE INDEX IF NOT EXISTS virtualrepo_origin_repo_idx " "ON VirtualRepo (origin_repo)"; if (seaf_db_query (db, sql) < 0) return -1; sql = "CREATE TABLE IF NOT EXISTS GarbageRepos (repo_id CHAR(36) PRIMARY KEY)"; if (seaf_db_query (db, sql) < 0) return -1; sql = "CREATE TABLE IF NOT EXISTS RepoTrash (repo_id CHAR(36) PRIMARY KEY," "repo_name VARCHAR(255), head_id CHAR(40), owner_id VARCHAR(255), size BIGINT UNSIGNED," "org_id INTEGER, del_time BIGINT)"; if (seaf_db_query (db, sql) < 0) return -1; sql = "CREATE INDEX IF NOT EXISTS repotrash_owner_id_idx ON RepoTrash(owner_id)"; if (seaf_db_query (db, sql) < 0) return -1; sql = "CREATE INDEX IF NOT EXISTS repotrash_org_id_idx ON RepoTrash(org_id)"; if (seaf_db_query (db, sql) < 0) return -1; sql = "CREATE TABLE IF NOT EXISTS RepoFileCount (" "repo_id CHAR(36) PRIMARY KEY," "file_count BIGINT UNSIGNED)"; if (seaf_db_query (db, sql) < 0) return -1; sql = "CREATE TABLE IF NOT EXISTS RepoInfo (repo_id CHAR(36) PRIMARY KEY, " "name VARCHAR(255) NOT NULL, update_time INTEGER, version INTEGER, " "is_encrypted INTEGER, last_modifier VARCHAR(255), status INTEGER DEFAULT 0)"; if (seaf_db_query (db, sql) < 0) return -1; sql = "CREATE TABLE IF NOT EXISTS WebUploadTempFiles (repo_id CHAR(40) NOT NULL, " "file_path TEXT NOT NULL, tmp_file_path TEXT NOT NULL)"; if (seaf_db_query (db, sql) < 0) return -1; sql = "CREATE INDEX IF NOT EXISTS webuploadtempfiles_repo_id_idx ON WebUploadTempFiles(repo_id)"; if (seaf_db_query (db, sql) < 0) return -1; return 0; } /* static int */ /* create_tables_pgsql (SeafRepoManager *mgr) */ /* { */ /* SeafDB *db = mgr->seaf->db; */ /* char *sql; */ /* sql = "CREATE TABLE IF NOT EXISTS Repo (repo_id CHAR(36) PRIMARY KEY)"; */ /* if (seaf_db_query (db, sql) < 0) */ /* return -1; */ /* sql = "CREATE TABLE IF NOT EXISTS RepoOwner (" */ /* "repo_id CHAR(36) PRIMARY KEY, " */ /* "owner_id VARCHAR(255))"; */ /* if (seaf_db_query (db, sql) < 0) */ /* return -1; */ /* if (!pgsql_index_exists (db, "repoowner_owner_idx")) { */ /* sql = "CREATE INDEX repoowner_owner_idx ON RepoOwner (owner_id)"; */ /* if (seaf_db_query (db, sql) < 0) */ /* return -1; */ /* } */ /* sql = "CREATE TABLE IF NOT EXISTS RepoGroup (repo_id CHAR(36), " */ /* "group_id INTEGER, user_name VARCHAR(255), permission VARCHAR(15), " */ /* "UNIQUE (group_id, repo_id))"; */ /* if (seaf_db_query (db, sql) < 0) */ /* return -1; */ /* if (!pgsql_index_exists (db, "repogroup_repoid_idx")) { */ /* sql = "CREATE INDEX repogroup_repoid_idx ON RepoGroup (repo_id)"; */ /* if (seaf_db_query (db, sql) < 0) */ /* return -1; */ /* } */ /* if (!pgsql_index_exists (db, "repogroup_username_idx")) { */ /* sql = "CREATE INDEX repogroup_username_idx ON RepoGroup (user_name)"; */ /* if (seaf_db_query (db, sql) < 0) */ /* return -1; */ /* } */ /* sql = "CREATE TABLE IF NOT EXISTS InnerPubRepo (" */ /* "repo_id CHAR(36) PRIMARY KEY," */ /* "permission VARCHAR(15))"; */ /* if (seaf_db_query (db, sql) < 0) */ /* return -1; */ /* sql = "CREATE TABLE IF NOT EXISTS RepoUserToken (" */ /* "repo_id CHAR(36), " */ /* "email VARCHAR(255), " */ /* "token CHAR(40), " */ /* "UNIQUE (repo_id, token))"; */ /* if (seaf_db_query (db, sql) < 0) */ /* return -1; */ /* if (!pgsql_index_exists (db, "repousertoken_email_idx")) { */ /* sql = "CREATE INDEX repousertoken_email_idx ON RepoUserToken (email)"; */ /* if (seaf_db_query (db, sql) < 0) */ /* return -1; */ /* } */ /* sql = "CREATE TABLE IF NOT EXISTS RepoTokenPeerInfo (" */ /* "token CHAR(40) PRIMARY KEY, " */ /* "peer_id CHAR(40), " */ /* "peer_ip VARCHAR(40), " */ /* "peer_name VARCHAR(255), " */ /* "sync_time BIGINT, " */ /* "client_ver VARCHAR(20))"; */ /* if (seaf_db_query (db, sql) < 0) */ /* return -1; */ /* sql = "CREATE TABLE IF NOT EXISTS RepoHead (" */ /* "repo_id CHAR(36) PRIMARY KEY, branch_name VARCHAR(10))"; */ /* if (seaf_db_query (db, sql) < 0) */ /* return -1; */ /* sql = "CREATE TABLE IF NOT EXISTS RepoSize (" */ /* "repo_id CHAR(36) PRIMARY KEY," */ /* "size BIGINT," */ /* "head_id CHAR(40))"; */ /* if (seaf_db_query (db, sql) < 0) */ /* return -1; */ /* sql = "CREATE TABLE IF NOT EXISTS RepoHistoryLimit (" */ /* "repo_id CHAR(36) PRIMARY KEY, days INTEGER)"; */ /* if (seaf_db_query (db, sql) < 0) */ /* return -1; */ /* sql = "CREATE TABLE IF NOT EXISTS RepoValidSince (" */ /* "repo_id CHAR(36) PRIMARY KEY, timestamp BIGINT)"; */ /* if (seaf_db_query (db, sql) < 0) */ /* return -1; */ /* sql = "CREATE TABLE IF NOT EXISTS WebAP (repo_id CHAR(36) PRIMARY KEY, " */ /* "access_property VARCHAR(10))"; */ /* if (seaf_db_query (db, sql) < 0) */ /* return -1; */ /* sql = "CREATE TABLE IF NOT EXISTS VirtualRepo (repo_id CHAR(36) PRIMARY KEY," */ /* "origin_repo CHAR(36), path TEXT, base_commit CHAR(40))"; */ /* if (seaf_db_query (db, sql) < 0) */ /* return -1; */ /* if (!pgsql_index_exists (db, "virtualrepo_origin_repo_idx")) { */ /* sql = "CREATE INDEX virtualrepo_origin_repo_idx ON VirtualRepo (origin_repo)"; */ /* if (seaf_db_query (db, sql) < 0) */ /* return -1; */ /* } */ /* sql = "CREATE TABLE IF NOT EXISTS GarbageRepos (repo_id CHAR(36) PRIMARY KEY)"; */ /* if (seaf_db_query (db, sql) < 0) */ /* return -1; */ /* sql = "CREATE TABLE IF NOT EXISTS RepoTrash (repo_id CHAR(36) PRIMARY KEY," */ /* "repo_name VARCHAR(255), head_id CHAR(40), owner_id VARCHAR(255), size bigint," */ /* "org_id INTEGER, del_time BIGINT)"; */ /* if (seaf_db_query (db, sql) < 0) */ /* return -1; */ /* if (!pgsql_index_exists (db, "repotrash_owner_id")) { */ /* sql = "CREATE INDEX repotrash_owner_id on RepoTrash(owner_id)"; */ /* if (seaf_db_query (db, sql) < 0) */ /* return -1; */ /* } */ /* if (!pgsql_index_exists (db, "repotrash_org_id")) { */ /* sql = "CREATE INDEX repotrash_org_id on RepoTrash(org_id)"; */ /* if (seaf_db_query (db, sql) < 0) */ /* return -1; */ /* } */ /* sql = "CREATE TABLE IF NOT EXISTS RepoFileCount (" */ /* "repo_id CHAR(36) PRIMARY KEY," */ /* "file_count BIGINT)"; */ /* if (seaf_db_query (db, sql) < 0) */ /* return -1; */ /* sql = "CREATE TABLE IF NOT EXISTS WebUploadTempFiles (repo_id CHAR(40) NOT NULL, " */ /* "file_path TEXT NOT NULL, tmp_file_path TEXT NOT NULL)"; */ /* if (seaf_db_query (db, sql) < 0) */ /* return -1; */ /* sql = "CREATE TABLE IF NOT EXISTS RepoInfo (repo_id CHAR(36) PRIMARY KEY, " */ /* "name VARCHAR(255) NOT NULL, update_time BIGINT, version INTEGER, " */ /* "is_encrypted INTEGER, last_modifier VARCHAR(255), status INTEGER DEFAULT 0)"; */ /* if (seaf_db_query (db, sql) < 0) */ /* return -1; */ /* return 0; */ /* } */ static int create_db_tables_if_not_exist (SeafRepoManager *mgr) { if (!mgr->seaf->create_tables && seaf_db_type (mgr->seaf->db) != SEAF_DB_TYPE_PGSQL) return 0; SeafDB *db = mgr->seaf->db; int db_type = seaf_db_type (db); if (db_type == SEAF_DB_TYPE_MYSQL) return create_tables_mysql (mgr); else if (db_type == SEAF_DB_TYPE_SQLITE) return create_tables_sqlite (mgr); /* else if (db_type == SEAF_DB_TYPE_PGSQL) */ /* return create_tables_pgsql (mgr); */ g_return_val_if_reached (-1); } /* * Repo properties functions. */ static inline char * generate_repo_token () { char *uuid = gen_uuid (); unsigned char sha1[20]; char token[41]; SHA_CTX s; SHA1_Init (&s); SHA1_Update (&s, uuid, strlen(uuid)); SHA1_Final (sha1, &s); rawdata_to_hex (sha1, token, 20); g_free (uuid); return g_strdup (token); } static int add_repo_token (SeafRepoManager *mgr, const char *repo_id, const char *email, const char *token, GError **error) { int rc = seaf_db_statement_query (mgr->seaf->db, "INSERT INTO RepoUserToken (repo_id, email, token) VALUES (?, ?, ?)", 3, "string", repo_id, "string", email, "string", token); if (rc < 0) { seaf_warning ("failed to add repo token. repo = %s, email = %s\n", repo_id, email); g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "DB error"); return -1; } return 0; } char * seaf_repo_manager_generate_repo_token (SeafRepoManager *mgr, const char *repo_id, const char *email, GError **error) { char *token = generate_repo_token (); if (add_repo_token (mgr, repo_id, email, token, error) < 0) { g_free (token); return NULL; } return token; } int seaf_repo_manager_add_token_peer_info (SeafRepoManager *mgr, const char *token, const char *peer_id, const char *peer_ip, const char *peer_name, gint64 sync_time, const char *client_ver) { int ret = 0; if (seaf_db_statement_query (mgr->seaf->db, "INSERT INTO RepoTokenPeerInfo (token, peer_id, peer_ip, peer_name, sync_time, client_ver)" "VALUES (?, ?, ?, ?, ?, ?)", 6, "string", token, "string", peer_id, "string", peer_ip, "string", peer_name, "int64", sync_time, "string", client_ver) < 0) ret = -1; return ret; } int seaf_repo_manager_update_token_peer_info (SeafRepoManager *mgr, const char *token, const char *peer_ip, gint64 sync_time, const char *client_ver) { int ret = 0; if (seaf_db_statement_query (mgr->seaf->db, "UPDATE RepoTokenPeerInfo SET " "peer_ip=?, sync_time=?, client_ver=? WHERE token=?", 4, "string", peer_ip, "int64", sync_time, "string", client_ver, "string", token) < 0) ret = -1; return ret; } gboolean seaf_repo_manager_token_peer_info_exists (SeafRepoManager *mgr, const char *token) { gboolean db_error = FALSE; return seaf_db_statement_exists (mgr->seaf->db, "SELECT token FROM RepoTokenPeerInfo WHERE token=?", &db_error, 1, "string", token); } int seaf_repo_manager_delete_token (SeafRepoManager *mgr, const char *repo_id, const char *token, const char *user, GError **error) { char *token_owner; token_owner = seaf_repo_manager_get_email_by_token (mgr, repo_id, token); if (!token_owner || strcmp (user, token_owner) != 0) { seaf_warning ("Requesting user is %s, token owner is %s, " "refuse to delete token %.10s.\n", user, token_owner, token); g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "Permission denied"); return -1; } if (seaf_db_statement_query (mgr->seaf->db, "DELETE t.*, i.* FROM RepoUserToken t, " "RepoTokenPeerInfo i WHERE t.token=i.token AND " "t.token=?", 1, "string", token) < 0) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "DB error"); return -1; } GList *tokens = NULL; tokens = g_list_append (tokens, g_strdup(token)); #ifdef HAVE_EVHTP seaf_http_server_invalidate_tokens (seaf->http_server, tokens); #endif g_list_free_full (tokens, (GDestroyNotify)g_free); return 0; } static gboolean collect_repo_token (SeafDBRow *row, void *data) { GList **ret_list = data; const char *repo_id, *repo_owner, *email, *token; const char *peer_id, *peer_ip, *peer_name; gint64 sync_time; const char *client_ver; repo_id = seaf_db_row_get_column_text (row, 0); repo_owner = seaf_db_row_get_column_text (row, 1); email = seaf_db_row_get_column_text (row, 2); token = seaf_db_row_get_column_text (row, 3); peer_id = seaf_db_row_get_column_text (row, 4); peer_ip = seaf_db_row_get_column_text (row, 5); peer_name = seaf_db_row_get_column_text (row, 6); sync_time = seaf_db_row_get_column_int64 (row, 7); client_ver = seaf_db_row_get_column_text (row, 8); char *owner_l = g_ascii_strdown (repo_owner, -1); char *email_l = g_ascii_strdown (email, -1); SeafileRepoTokenInfo *repo_token_info; repo_token_info = g_object_new (SEAFILE_TYPE_REPO_TOKEN_INFO, "repo_id", repo_id, "repo_owner", owner_l, "email", email_l, "token", token, "peer_id", peer_id, "peer_ip", peer_ip, "peer_name", peer_name, "sync_time", sync_time, "client_ver", client_ver, NULL); *ret_list = g_list_prepend (*ret_list, repo_token_info); g_free (owner_l); g_free (email_l); return TRUE; } static void fill_in_token_info (GList *info_list) { GList *ptr; SeafileRepoTokenInfo *info; SeafRepo *repo; char *repo_name; for (ptr = info_list; ptr; ptr = ptr->next) { info = ptr->data; repo = seaf_repo_manager_get_repo (seaf->repo_mgr, seafile_repo_token_info_get_repo_id(info)); if (repo) repo_name = g_strdup(repo->name); else repo_name = g_strdup("Unknown"); seaf_repo_unref (repo); g_object_set (info, "repo_name", repo_name, NULL); g_free (repo_name); } } GList * seaf_repo_manager_list_repo_tokens (SeafRepoManager *mgr, const char *repo_id, GError **error) { GList *ret_list = NULL; char *sql; gboolean db_err = FALSE; if (!repo_exists_in_db (mgr->seaf->db, repo_id, &db_err)) { if (db_err) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "DB error"); } return NULL; } sql = "SELECT u.repo_id, o.owner_id, u.email, u.token, " "p.peer_id, p.peer_ip, p.peer_name, p.sync_time, p.client_ver " "FROM RepoUserToken u LEFT JOIN RepoTokenPeerInfo p " "ON u.token = p.token, RepoOwner o " "WHERE u.repo_id = ? and o.repo_id = ? "; int n_row = seaf_db_statement_foreach_row (mgr->seaf->db, sql, collect_repo_token, &ret_list, 2, "string", repo_id, "string", repo_id); if (n_row < 0) { seaf_warning ("DB error when get token info for repo %.10s.\n", repo_id); g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "DB error"); } fill_in_token_info (ret_list); return g_list_reverse(ret_list); } GList * seaf_repo_manager_list_repo_tokens_by_email (SeafRepoManager *mgr, const char *email, GError **error) { GList *ret_list = NULL; char *sql; sql = "SELECT u.repo_id, o.owner_id, u.email, u.token, " "p.peer_id, p.peer_ip, p.peer_name, p.sync_time, p.client_ver " "FROM RepoUserToken u LEFT JOIN RepoTokenPeerInfo p " "ON u.token = p.token, RepoOwner o " "WHERE u.email = ? and u.repo_id = o.repo_id"; int n_row = seaf_db_statement_foreach_row (mgr->seaf->db, sql, collect_repo_token, &ret_list, 1, "string", email); if (n_row < 0) { seaf_warning ("DB error when get token info for email %s.\n", email); g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "DB error"); } fill_in_token_info (ret_list); return g_list_reverse(ret_list); } static gboolean collect_token_list (SeafDBRow *row, void *data) { GList **p_tokens = data; const char *token; token = seaf_db_row_get_column_text (row, 0); *p_tokens = g_list_prepend (*p_tokens, g_strdup(token)); return TRUE; } /** * Delete all repo tokens for a given user on a given client */ int seaf_repo_manager_delete_repo_tokens_by_peer_id (SeafRepoManager *mgr, const char *email, const char *peer_id, GList **tokens, GError **error) { int ret = 0; const char *template; GList *token_list = NULL; int rc = 0; int db_type = seaf_db_type (mgr->seaf->db); template = "SELECT u.token " "FROM RepoUserToken u, RepoTokenPeerInfo p " "WHERE u.token = p.token " "AND u.email = ? AND p.peer_id = ?"; rc = seaf_db_statement_foreach_row (mgr->seaf->db, template, collect_token_list, &token_list, 2, "string", email, "string", peer_id); if (rc < 0) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_INTERNAL, "DB error"); goto out; } if (rc == 0) goto out; if (db_type == SEAF_DB_TYPE_MYSQL) { rc = seaf_db_statement_query (mgr->seaf->db, "DELETE u.*, p.* " "FROM RepoUserToken u, RepoTokenPeerInfo p " "WHERE u.token=p.token AND " "u.email = ? AND p.peer_id = ?", 2, "string", email, "string", peer_id); if (rc < 0) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_INTERNAL, "DB error"); goto out; } } else if (db_type == SEAF_DB_TYPE_SQLITE) { GString *sql = g_string_new (""); GList *iter; int i = 0; char *token; g_string_append_printf (sql, "DELETE FROM RepoUserToken WHERE email = '%s' AND token IN (", email); for (iter = token_list; iter; iter = iter->next) { token = iter->data; if (i == 0) g_string_append_printf (sql, "'%s'", token); else g_string_append_printf (sql, ", '%s'", token); ++i; } g_string_append (sql, ")"); rc = seaf_db_statement_query (mgr->seaf->db, sql->str, 0); if (rc < 0) { g_string_free (sql, TRUE); g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_INTERNAL, "DB error"); goto out; } g_string_free (sql, TRUE); sql = g_string_new (""); g_string_append_printf (sql, "DELETE FROM RepoTokenPeerInfo WHERE peer_id = '%s' AND token IN (", peer_id); i = 0; for (iter = token_list; iter; iter = iter->next) { token = iter->data; if (i == 0) g_string_append_printf (sql, "'%s'", token); else g_string_append_printf (sql, ", '%s'", token); ++i; } g_string_append (sql, ")"); rc = seaf_db_statement_query (mgr->seaf->db, sql->str, 0); if (rc < 0) { g_string_free (sql, TRUE); g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_INTERNAL, "DB error"); goto out; } g_string_free (sql, TRUE); } out: if (rc < 0) { ret = -1; g_list_free_full (token_list, (GDestroyNotify)g_free); } else { *tokens = token_list; } return ret; } int seaf_repo_manager_delete_repo_tokens_by_email (SeafRepoManager *mgr, const char *email, GError **error) { int ret = 0; const char *template; GList *token_list = NULL; int rc; template = "SELECT u.token " "FROM RepoUserToken u, RepoTokenPeerInfo p " "WHERE u.token = p.token " "AND u.email = ?"; rc = seaf_db_statement_foreach_row (mgr->seaf->db, template, collect_token_list, &token_list, 1, "string", email); if (rc < 0) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_INTERNAL, "DB error"); goto out; } if (rc == 0) goto out; rc = seaf_db_statement_query (mgr->seaf->db, "DELETE u.*, p.* " "FROM RepoUserToken u, RepoTokenPeerInfo p " "WHERE u.token=p.token AND " "u.email = ?", 1, "string", email); if (rc < 0) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_INTERNAL, "DB error"); goto out; } #ifdef HAVE_EVHTP seaf_http_server_invalidate_tokens (seaf->http_server, token_list); #endif out: g_list_free_full (token_list, (GDestroyNotify)g_free); if (rc < 0) { ret = -1; } return ret; } static gboolean get_email_by_token_cb (SeafDBRow *row, void *data) { char **email_ptr = data; const char *email = (const char *) seaf_db_row_get_column_text (row, 0); *email_ptr = g_ascii_strdown (email, -1); /* There should be only one result. */ return FALSE; } char * seaf_repo_manager_get_email_by_token (SeafRepoManager *manager, const char *repo_id, const char *token) { if (!repo_id || !token) return NULL; char *email = NULL; char *sql; sql = "SELECT email FROM RepoUserToken " "WHERE repo_id = ? AND token = ?"; seaf_db_statement_foreach_row (seaf->db, sql, get_email_by_token_cb, &email, 2, "string", repo_id, "string", token); return email; } static gboolean get_repo_size (SeafDBRow *row, void *vsize) { gint64 *psize = vsize; *psize = seaf_db_row_get_column_int64 (row, 0); return FALSE; } gint64 seaf_repo_manager_get_repo_size (SeafRepoManager *mgr, const char *repo_id) { gint64 size = 0; char *sql; sql = "SELECT size FROM RepoSize WHERE repo_id=?"; if (seaf_db_statement_foreach_row (mgr->seaf->db, sql, get_repo_size, &size, 1, "string", repo_id) < 0) return -1; return size; } int seaf_repo_manager_set_repo_history_limit (SeafRepoManager *mgr, const char *repo_id, int days) { SeafVirtRepo *vinfo; SeafDB *db = mgr->seaf->db; vinfo = seaf_repo_manager_get_virtual_repo_info (mgr, repo_id); if (vinfo) { seaf_virtual_repo_info_free (vinfo); return 0; } if (seaf_db_type(db) == SEAF_DB_TYPE_PGSQL) { gboolean exists, err; int rc; exists = seaf_db_statement_exists (db, "SELECT repo_id FROM RepoHistoryLimit " "WHERE repo_id=?", &err, 1, "string", repo_id); if (err) return -1; if (exists) rc = seaf_db_statement_query (db, "UPDATE RepoHistoryLimit SET days=? " "WHERE repo_id=?", 2, "int", days, "string", repo_id); else rc = seaf_db_statement_query (db, "INSERT INTO RepoHistoryLimit (repo_id, days) VALUES " "(?, ?)", 2, "string", repo_id, "int", days); return rc; } else { if (seaf_db_statement_query (db, "REPLACE INTO RepoHistoryLimit (repo_id, days) VALUES (?, ?)", 2, "string", repo_id, "int", days) < 0) return -1; } return 0; } static gboolean get_history_limit_cb (SeafDBRow *row, void *data) { int *limit = data; *limit = seaf_db_row_get_column_int (row, 0); return FALSE; } int seaf_repo_manager_get_repo_history_limit (SeafRepoManager *mgr, const char *repo_id) { SeafVirtRepo *vinfo; const char *r_repo_id = repo_id; char *sql; int per_repo_days = -1; int ret; vinfo = seaf_repo_manager_get_virtual_repo_info (mgr, repo_id); if (vinfo) r_repo_id = vinfo->origin_repo_id; sql = "SELECT days FROM RepoHistoryLimit WHERE repo_id=?"; ret = seaf_db_statement_foreach_row (mgr->seaf->db, sql, get_history_limit_cb, &per_repo_days, 1, "string", r_repo_id); if (ret == 0) { // limit not set, return global one per_repo_days= seaf_cfg_manager_get_config_int (mgr->seaf->cfg_mgr, "history", "keep_days"); } // db error or limit set as negative, means keep full history, return -1 if (per_repo_days < 0) per_repo_days = -1; seaf_virtual_repo_info_free (vinfo); return per_repo_days; } int seaf_repo_manager_set_repo_valid_since (SeafRepoManager *mgr, const char *repo_id, gint64 timestamp) { SeafDB *db = mgr->seaf->db; if (seaf_db_type(db) == SEAF_DB_TYPE_PGSQL) { gboolean exists, err; int rc; exists = seaf_db_statement_exists (db, "SELECT repo_id FROM RepoValidSince WHERE " "repo_id=?", &err, 1, "string", repo_id); if (err) return -1; if (exists) rc = seaf_db_statement_query (db, "UPDATE RepoValidSince SET timestamp=?" " WHERE repo_id=?", 2, "int64", timestamp, "string", repo_id); else rc = seaf_db_statement_query (db, "INSERT INTO RepoValidSince (repo_id, timestamp) VALUES " "(?, ?)", 2, "string", repo_id, "int64", timestamp); if (rc < 0) return -1; } else { if (seaf_db_statement_query (db, "REPLACE INTO RepoValidSince (repo_id, timestamp) VALUES (?, ?)", 2, "string", repo_id, "int64", timestamp) < 0) return -1; } return 0; } gint64 seaf_repo_manager_get_repo_valid_since (SeafRepoManager *mgr, const char *repo_id) { char *sql; sql = "SELECT timestamp FROM RepoValidSince WHERE repo_id=?"; /* Also return -1 if doesn't exist. */ return seaf_db_statement_get_int64 (mgr->seaf->db, sql, 1, "string", repo_id); } gint64 seaf_repo_manager_get_repo_truncate_time (SeafRepoManager *mgr, const char *repo_id) { int days; gint64 timestamp; days = seaf_repo_manager_get_repo_history_limit (mgr, repo_id); timestamp = seaf_repo_manager_get_repo_valid_since (mgr, repo_id); gint64 now = (gint64)time(NULL); if (days > 0) return MAX (now - days * 24 * 3600, timestamp); else if (days < 0) return timestamp; else return 0; } /* * Permission related functions. */ /* Owner functions. */ int seaf_repo_manager_set_repo_owner (SeafRepoManager *mgr, const char *repo_id, const char *email) { SeafDB *db = mgr->seaf->db; char sql[256]; char *orig_owner = NULL; int ret = 0; orig_owner = seaf_repo_manager_get_repo_owner (mgr, repo_id); if (g_strcmp0 (orig_owner, email) == 0) goto out; if (seaf_db_type(db) == SEAF_DB_TYPE_PGSQL) { gboolean err; snprintf(sql, sizeof(sql), "SELECT repo_id FROM RepoOwner WHERE repo_id=?"); if (seaf_db_statement_exists (db, sql, &err, 1, "string", repo_id)) snprintf(sql, sizeof(sql), "UPDATE RepoOwner SET owner_id='%s' WHERE " "repo_id='%s'", email, repo_id); else snprintf(sql, sizeof(sql), "INSERT INTO RepoOwner (repo_id, owner_id) VALUES ('%s', '%s')", repo_id, email); if (err) { ret = -1; goto out; } if (seaf_db_query (db, sql) < 0) { ret = -1; goto out; } } else { if (seaf_db_statement_query (db, "REPLACE INTO RepoOwner (repo_id, owner_id) VALUES (?, ?)", 2, "string", repo_id, "string", email) < 0) { ret = -1; goto out; } } /* If the repo was newly created, no need to remove share and virtual repos. */ if (!orig_owner) goto out; seaf_db_statement_query (mgr->seaf->db, "DELETE FROM SharedRepo WHERE repo_id = ?", 1, "string", repo_id); seaf_db_statement_query (mgr->seaf->db, "DELETE FROM RepoGroup WHERE repo_id = ?", 1, "string", repo_id); if (!seaf->cloud_mode) { seaf_db_statement_query (mgr->seaf->db, "DELETE FROM InnerPubRepo WHERE repo_id = ?", 1, "string", repo_id); } /* Remove virtual repos when repo ownership changes. */ GList *vrepos, *ptr; vrepos = seaf_repo_manager_get_virtual_repo_ids_by_origin (mgr, repo_id); for (ptr = vrepos; ptr != NULL; ptr = ptr->next) remove_virtual_repo_ondisk (mgr, (char *)ptr->data); string_list_free (vrepos); seaf_db_statement_query (mgr->seaf->db, "DELETE FROM VirtualRepo " "WHERE repo_id=? OR origin_repo=?", 2, "string", repo_id, "string", repo_id); out: g_free (orig_owner); return ret; } static gboolean get_owner (SeafDBRow *row, void *data) { char **owner_id = data; const char *owner = (const char *) seaf_db_row_get_column_text (row, 0); *owner_id = g_ascii_strdown (owner, -1); /* There should be only one result. */ return FALSE; } char * seaf_repo_manager_get_repo_owner (SeafRepoManager *mgr, const char *repo_id) { char *sql; char *ret = NULL; sql = "SELECT owner_id FROM RepoOwner WHERE repo_id=?"; if (seaf_db_statement_foreach_row (mgr->seaf->db, sql, get_owner, &ret, 1, "string", repo_id) < 0) { seaf_warning ("Failed to get owner id for repo %s.\n", repo_id); return NULL; } return ret; } static gboolean collect_repo_id (SeafDBRow *row, void *data) { GList **p_ids = data; const char *repo_id; repo_id = seaf_db_row_get_column_text (row, 0); *p_ids = g_list_prepend (*p_ids, g_strdup(repo_id)); return TRUE; } GList * seaf_repo_manager_get_orphan_repo_list (SeafRepoManager *mgr) { GList *id_list = NULL, *ptr; GList *ret = NULL; char sql[256]; snprintf (sql, sizeof(sql), "SELECT Repo.repo_id FROM Repo LEFT JOIN " "RepoOwner ON Repo.repo_id = RepoOwner.repo_id WHERE " "RepoOwner.owner_id is NULL"); if (seaf_db_foreach_selected_row (mgr->seaf->db, sql, collect_repo_id, &id_list) < 0) return NULL; for (ptr = id_list; ptr; ptr = ptr->next) { char *repo_id = ptr->data; SeafRepo *repo = seaf_repo_manager_get_repo (mgr, repo_id); if (repo != NULL) ret = g_list_prepend (ret, repo); } string_list_free (id_list); return ret; } gboolean collect_repos_fill_size_commit (SeafDBRow *row, void *data) { GList **prepos = data; SeafRepo *repo; SeafBranch *head; const char *repo_id = seaf_db_row_get_column_text (row, 0); gint64 size = seaf_db_row_get_column_int64 (row, 1); const char *commit_id = seaf_db_row_get_column_text (row, 2); const char *repo_name = seaf_db_row_get_column_text (row, 3); gint64 update_time = seaf_db_row_get_column_int64 (row, 4); int version = seaf_db_row_get_column_int (row, 5); gboolean is_encrypted = seaf_db_row_get_column_int (row, 6) ? TRUE : FALSE; const char *last_modifier = seaf_db_row_get_column_text (row, 7); int status = seaf_db_row_get_column_int (row, 8); const char *type = seaf_db_row_get_column_text (row, 9); repo = seaf_repo_new (repo_id, NULL, NULL); if (!repo) return TRUE; if (!commit_id) { repo->is_corrupted = TRUE; goto out; } repo->size = size; if (seaf_db_row_get_column_count (row) == 11) { gint64 file_count = seaf_db_row_get_column_int64 (row, 10); repo->file_count = file_count; } head = seaf_branch_new ("master", repo_id, commit_id); repo->head = head; if (repo_name) { repo->name = g_strdup (repo_name); repo->last_modify = update_time; repo->version = version; repo->encrypted = is_encrypted; repo->last_modifier = g_strdup (last_modifier); repo->status = status; } if (type) { repo->type = g_strdup(type); } out: *prepos = g_list_prepend (*prepos, repo); return TRUE; } GList * seaf_repo_manager_get_repos_by_owner (SeafRepoManager *mgr, const char *email, int ret_corrupted, int start, int limit, gboolean *db_err) { GList *repo_list = NULL, *ptr; GList *ret = NULL; char *sql; SeafRepo *repo = NULL; int db_type = seaf_db_type(mgr->seaf->db); if (start == -1 && limit == -1) { if (db_type != SEAF_DB_TYPE_PGSQL) sql = "SELECT o.repo_id, s.size, b.commit_id, i.name, i.update_time, " "i.version, i.is_encrypted, i.last_modifier, i.status, i.type FROM " "RepoOwner o LEFT JOIN RepoSize s ON o.repo_id = s.repo_id " "LEFT JOIN Branch b ON o.repo_id = b.repo_id " "LEFT JOIN RepoInfo i ON o.repo_id = i.repo_id " "LEFT JOIN VirtualRepo v ON o.repo_id = v.repo_id " "WHERE owner_id=? AND " "v.repo_id IS NULL " "ORDER BY i.update_time DESC, o.repo_id"; else sql = "SELECT o.repo_id, s.\"size\", b.commit_id, i.name, i.update_time, " "i.version, i.is_encrypted, i.last_modifier, i.status FROM " "RepoOwner o LEFT JOIN RepoSize s ON o.repo_id = s.repo_id " "LEFT JOIN Branch b ON o.repo_id = b.repo_id " "LEFT JOIN RepoInfo i ON o.repo_id = i.repo_id " "WHERE owner_id=? AND " "o.repo_id NOT IN (SELECT v.repo_id FROM VirtualRepo v) " "ORDER BY i.update_time DESC, o.repo_id"; if (seaf_db_statement_foreach_row (mgr->seaf->db, sql, collect_repos_fill_size_commit, &repo_list, 1, "string", email) < 0) { if (db_err) *db_err = TRUE; return NULL; } } else { if (db_type != SEAF_DB_TYPE_PGSQL) sql = "SELECT o.repo_id, s.size, b.commit_id, i.name, i.update_time, " "i.version, i.is_encrypted, i.last_modifier, i.status, i.type FROM " "RepoOwner o LEFT JOIN RepoSize s ON o.repo_id = s.repo_id " "LEFT JOIN Branch b ON o.repo_id = b.repo_id " "LEFT JOIN RepoInfo i ON o.repo_id = i.repo_id " "LEFT JOIN VirtualRepo v ON o.repo_id = v.repo_id " "WHERE owner_id=? AND " "v.repo_id IS NULL " "ORDER BY i.update_time DESC, o.repo_id " "LIMIT ? OFFSET ?"; else sql = "SELECT o.repo_id, s.\"size\", b.commit_id, i.name, i.update_time, " "i.version, i.is_encrypted, i.last_modifier, i.status FROM " "RepoOwner o LEFT JOIN RepoSize s ON o.repo_id = s.repo_id " "LEFT JOIN Branch b ON o.repo_id = b.repo_id " "LEFT JOIN RepoInfo i ON o.repo_id = i.repo_id " "WHERE owner_id=? AND " "o.repo_id NOT IN (SELECT v.repo_id FROM VirtualRepo v) " "ORDER BY i.update_time DESC, o.repo_id " "LIMIT ? OFFSET ?"; if (seaf_db_statement_foreach_row (mgr->seaf->db, sql, collect_repos_fill_size_commit, &repo_list, 3, "string", email, "int", limit, "int", start) < 0) { if (db_err) *db_err = TRUE; return NULL; } } for (ptr = repo_list; ptr; ptr = ptr->next) { repo = ptr->data; if (ret_corrupted) { if (!repo->is_corrupted && (!repo->name || !repo->last_modifier)) { load_mini_repo (mgr, repo); if (!repo->is_corrupted) set_repo_commit_to_db (repo->id, repo->name, repo->last_modify, repo->version, (repo->encrypted ? 1 : 0), repo->last_modifier); } } else { if (repo->is_corrupted) { seaf_repo_unref (repo); continue; } if (!repo->name || !repo->last_modifier) { load_mini_repo (mgr, repo); if (!repo->is_corrupted) set_repo_commit_to_db (repo->id, repo->name, repo->last_modify, repo->version, (repo->encrypted ? 1 : 0), repo->last_modifier); } if (repo->is_corrupted) { seaf_repo_unref (repo); continue; } } if (repo != NULL) ret = g_list_prepend (ret, repo); } g_list_free (repo_list); return ret; } GList * seaf_repo_manager_get_repos_by_id_prefix (SeafRepoManager *mgr, const char *id_prefix, int start, int limit) { GList *repo_list = NULL, *ptr; char *sql; SeafRepo *repo = NULL; int len = strlen(id_prefix); if (len >= 37) return NULL; int db_type = seaf_db_type(mgr->seaf->db); char *db_patt = g_strdup_printf ("%s%%", id_prefix); if (start == -1 && limit == -1) { if (db_type != SEAF_DB_TYPE_PGSQL) sql = "SELECT i.repo_id, s.size, b.commit_id, i.name, i.update_time, " "i.version, i.is_encrypted, i.last_modifier, i.status, i.type FROM " "RepoInfo i LEFT JOIN RepoSize s ON i.repo_id = s.repo_id " "LEFT JOIN Branch b ON i.repo_id = b.repo_id " "LEFT JOIN VirtualRepo v ON i.repo_id = v.repo_id " "WHERE i.repo_id LIKE ? AND " "v.repo_id IS NULL " "ORDER BY i.update_time DESC, i.repo_id"; else sql = "SELECT i.repo_id, s.\"size\", b.commit_id, i.name, i.update_time, " "i.version, i.is_encrypted, i.last_modifier, i.status FROM " "RepoInfo i LEFT JOIN RepoSize s ON i.repo_id = s.repo_id " "LEFT JOIN Branch b ON i.repo_id = b.repo_id " "WHERE i.repo_id LIKE ? AND " "i.repo_id NOT IN (SELECT v.repo_id FROM VirtualRepo v) " "ORDER BY i.update_time DESC, i.repo_id"; if (seaf_db_statement_foreach_row (mgr->seaf->db, sql, collect_repos_fill_size_commit, &repo_list, 1, "string", db_patt) < 0) { g_free(db_patt); return NULL; } } else { if (db_type != SEAF_DB_TYPE_PGSQL) sql = "SELECT i.repo_id, s.size, b.commit_id, i.name, i.update_time, " "i.version, i.is_encrypted, i.last_modifier, i.status, i.type FROM " "RepoInfo i LEFT JOIN RepoSize s ON i.repo_id = s.repo_id " "LEFT JOIN Branch b ON i.repo_id = b.repo_id " "LEFT JOIN VirtualRepo v ON i.repo_id = v.repo_id " "WHERE i.repo_id LIKE ? AND " "v.repo_id IS NULL " "ORDER BY i.update_time DESC, i.repo_id " "LIMIT ? OFFSET ?"; else sql = "SELECT i.repo_id, s.\"size\", b.commit_id, i.name, i.update_time, " "i.version, i.is_encrypted, i.last_modifier, i.status FROM " "RepoInfo i LEFT JOIN RepoSize s ON i.repo_id = s.repo_id " "LEFT JOIN Branch b ON i.repo_id = b.repo_id " "WHERE i.repo_id LIKE ? AND " "i.repo_id NOT IN (SELECT v.repo_id FROM VirtualRepo v) " "ORDER BY i.update_time DESC, i.repo_id " "LIMIT ? OFFSET ?"; if (seaf_db_statement_foreach_row (mgr->seaf->db, sql, collect_repos_fill_size_commit, &repo_list, 3, "string", db_patt, "int", limit, "int", start) < 0) { g_free(db_patt); return NULL; } } g_free(db_patt); return repo_list; } GList * seaf_repo_manager_search_repos_by_name (SeafRepoManager *mgr, const char *name) { GList *repo_list = NULL; char *sql = NULL; char *db_patt = g_strdup_printf ("%%%s%%", name); switch (seaf_db_type(seaf->db)) { case SEAF_DB_TYPE_MYSQL: sql = "SELECT i.repo_id, s.size, b.commit_id, i.name, i.update_time, " "i.version, i.is_encrypted, i.last_modifier, i.status, i.type, fc.file_count FROM " "RepoInfo i LEFT JOIN RepoSize s ON i.repo_id = s.repo_id " "LEFT JOIN Branch b ON i.repo_id = b.repo_id " "LEFT JOIN RepoFileCount fc ON i.repo_id = fc.repo_id " "LEFT JOIN Repo r ON i.repo_id = r.repo_id " "LEFT JOIN VirtualRepo v ON i.repo_id = v.repo_id " "WHERE i.name COLLATE UTF8_GENERAL_CI LIKE ? AND " "r.repo_id IS NOT NULL AND " "v.repo_id IS NULL " "ORDER BY i.update_time DESC, i.repo_id"; break; case SEAF_DB_TYPE_PGSQL: sql = "SELECT i.repo_id, s.\"size\", b.commit_id, i.name, i.update_time, " "i.version, i.is_encrypted, i.last_modifier, i.status, fc.file_count FROM " "RepoInfo i LEFT JOIN RepoSize s ON i.repo_id = s.repo_id " "LEFT JOIN Branch b ON i.repo_id = b.repo_id " "LEFT JOIN RepoFileCount fc ON i.repo_id = fc.repo_id " "WHERE i.name ILIKE ? AND " "i.repo_id IN (SELECT r.repo_id FROM Repo r) AND " "i.repo_id NOT IN (SELECT v.repo_id FROM VirtualRepo v) " "ORDER BY i.update_time DESC, i.repo_id"; break; case SEAF_DB_TYPE_SQLITE: sql = "SELECT i.repo_id, s.size, b.commit_id, i.name, i.update_time, " "i.version, i.is_encrypted, i.last_modifier, i.status, i.type, fc.file_count FROM " "RepoInfo i LEFT JOIN RepoSize s ON i.repo_id = s.repo_id " "LEFT JOIN Branch b ON i.repo_id = b.repo_id " "LEFT JOIN RepoFileCount fc ON i.repo_id = fc.repo_id " "LEFT JOIN Repo r ON i.repo_id = r.repo_id " "LEFT JOIN VirtualRepo v ON i.repo_id = v.repo_id " "WHERE i.name LIKE ? COLLATE NOCASE AND " "r.repo_id IS NOT NULL AND " "v.repo_id IS NULL " "ORDER BY i.update_time DESC, i.repo_id"; break; default: g_free (db_patt); return NULL; } if (seaf_db_statement_foreach_row (mgr->seaf->db, sql, collect_repos_fill_size_commit, &repo_list, 1, "string", db_patt) < 0) { g_free (db_patt); return NULL; } g_free (db_patt); return repo_list; } GList * seaf_repo_manager_get_repo_id_list (SeafRepoManager *mgr) { GList *ret = NULL; char sql[256]; snprintf (sql, 256, "SELECT repo_id FROM Repo"); if (seaf_db_foreach_selected_row (mgr->seaf->db, sql, collect_repo_id, &ret) < 0) return NULL; return ret; } GList * seaf_repo_manager_get_repo_list (SeafRepoManager *mgr, int start, int limit, const char *order_by, int ret_virt_repo) { GList *ret = NULL; int rc; GString *sql = g_string_new (""); if (start == -1 && limit == -1) { switch (seaf_db_type(mgr->seaf->db)) { case SEAF_DB_TYPE_MYSQL: g_string_append (sql, "SELECT i.repo_id, s.size, b.commit_id, i.name, i.update_time, " "i.version, i.is_encrypted, i.last_modifier, i.status, i.type, f.file_count FROM " "RepoInfo i LEFT JOIN RepoSize s ON i.repo_id = s.repo_id " "LEFT JOIN Branch b ON i.repo_id = b.repo_id " "LEFT JOIN RepoFileCount f ON i.repo_id = f.repo_id " "LEFT JOIN Repo r ON i.repo_id = r.repo_id " "LEFT JOIN VirtualRepo v ON i.repo_id = v.repo_id " "WHERE r.repo_id IS NOT NULL "); if (!ret_virt_repo) g_string_append_printf (sql, "AND v.repo_id IS NULL "); if (g_strcmp0 (order_by, "size") == 0) g_string_append_printf (sql, "ORDER BY s.size DESC, i.repo_id"); else if (g_strcmp0 (order_by, "file_count") == 0) g_string_append_printf (sql, "ORDER BY f.file_count DESC, i.repo_id"); else g_string_append_printf (sql, "ORDER BY i.update_time DESC, i.repo_id"); break; case SEAF_DB_TYPE_SQLITE: g_string_append (sql, "SELECT i.repo_id, s.size, b.commit_id, i.name, i.update_time, " "i.version, i.is_encrypted, i.last_modifier, i.status, i.type, f.file_count FROM " "RepoInfo i LEFT JOIN RepoSize s ON i.repo_id = s.repo_id " "LEFT JOIN Branch b ON i.repo_id = b.repo_id " "LEFT JOIN RepoFileCount f ON i.repo_id = f.repo_id " "LEFT JOIN Repo r ON i.repo_id = r.repo_id " "LEFT JOIN VirtualRepo v ON i.repo_id = v.repo_id " "WHERE r.repo_id IS NOT NULL "); if (!ret_virt_repo) g_string_append_printf (sql, "AND v.repo_id IS NULL "); if (g_strcmp0 (order_by, "size") == 0) g_string_append_printf (sql, "ORDER BY s.size DESC, i.repo_id"); else if (g_strcmp0 (order_by, "file_count") == 0) g_string_append_printf (sql, "ORDER BY f.file_count DESC, i.repo_id"); else g_string_append_printf (sql, "ORDER BY i.update_time DESC, i.repo_id"); break; default: g_string_free (sql, TRUE); return NULL; } rc = seaf_db_statement_foreach_row (mgr->seaf->db, sql->str, collect_repos_fill_size_commit, &ret, 0); } else { switch (seaf_db_type(mgr->seaf->db)) { case SEAF_DB_TYPE_MYSQL: g_string_append (sql, "SELECT i.repo_id, s.size, b.commit_id, i.name, i.update_time, " "i.version, i.is_encrypted, i.last_modifier, i.status, i.type, f.file_count FROM " "RepoInfo i LEFT JOIN RepoSize s ON i.repo_id = s.repo_id " "LEFT JOIN Branch b ON i.repo_id = b.repo_id " "LEFT JOIN RepoFileCount f ON i.repo_id = f.repo_id " "LEFT JOIN Repo r ON i.repo_id = r.repo_id " "LEFT JOIN VirtualRepo v ON i.repo_id = v.repo_id " "WHERE r.repo_id IS NOT NULL "); if (!ret_virt_repo) g_string_append_printf (sql, "AND v.repo_id IS NULL "); if (g_strcmp0 (order_by, "size") == 0) g_string_append_printf (sql, "ORDER BY s.size DESC, i.repo_id LIMIT ? OFFSET ?"); else if (g_strcmp0 (order_by, "file_count") == 0) g_string_append_printf (sql, "ORDER BY f.file_count DESC, i.repo_id LIMIT ? OFFSET ?"); else g_string_append_printf (sql, "ORDER BY i.update_time DESC, i.repo_id LIMIT ? OFFSET ?"); break; case SEAF_DB_TYPE_SQLITE: g_string_append (sql, "SELECT i.repo_id, s.size, b.commit_id, i.name, i.update_time, " "i.version, i.is_encrypted, i.last_modifier, i.status, i.type, f.file_count FROM " "RepoInfo i LEFT JOIN RepoSize s ON i.repo_id = s.repo_id " "LEFT JOIN Branch b ON i.repo_id = b.repo_id " "LEFT JOIN RepoFileCount f ON i.repo_id = f.repo_id " "LEFT JOIN Repo r ON i.repo_id = r.repo_id " "LEFT JOIN VirtualRepo v ON i.repo_id = v.repo_id " "WHERE r.repo_id IS NOT NULL "); if (!ret_virt_repo) g_string_append_printf (sql, "AND v.repo_id IS NULL "); if (g_strcmp0 (order_by, "size") == 0) g_string_append_printf (sql, "ORDER BY s.size DESC, i.repo_id LIMIT ? OFFSET ?"); else if (g_strcmp0 (order_by, "file_count") == 0) g_string_append_printf (sql, "ORDER BY f.file_count DESC, i.repo_id LIMIT ? OFFSET ?"); else g_string_append_printf (sql, "ORDER BY i.update_time DESC, i.repo_id LIMIT ? OFFSET ?"); break; default: g_string_free (sql, TRUE); return NULL; } rc = seaf_db_statement_foreach_row (mgr->seaf->db, sql->str, collect_repos_fill_size_commit, &ret, 2, "int", limit, "int", start); } g_string_free (sql, TRUE); if (rc < 0) return NULL; return g_list_reverse (ret); } gint64 seaf_repo_manager_count_repos (SeafRepoManager *mgr, GError **error) { gint64 num = seaf_db_get_int64 (mgr->seaf->db, "SELECT COUNT(repo_id) FROM Repo"); if (num < 0) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "Failed to count repos from db"); } return num; } GList * seaf_repo_manager_get_repo_ids_by_owner (SeafRepoManager *mgr, const char *email) { GList *ret = NULL; char *sql; sql = "SELECT repo_id FROM RepoOwner WHERE owner_id=?"; if (seaf_db_statement_foreach_row (mgr->seaf->db, sql, collect_repo_id, &ret, 1, "string", email) < 0) { string_list_free (ret); return NULL; } return ret; } static gboolean collect_trash_repo (SeafDBRow *row, void *data) { GList **trash_repos = data; const char *repo_id; const char *repo_name; const char *head_id; const char *owner_id; gint64 size; gint64 del_time; repo_id = seaf_db_row_get_column_text (row, 0); repo_name = seaf_db_row_get_column_text (row, 1); head_id = seaf_db_row_get_column_text (row, 2); owner_id = seaf_db_row_get_column_text (row, 3); size = seaf_db_row_get_column_int64 (row, 4); del_time = seaf_db_row_get_column_int64 (row, 5); if (!repo_id || !repo_name || !head_id || !owner_id) return TRUE; SeafileTrashRepo *trash_repo = g_object_new (SEAFILE_TYPE_TRASH_REPO, "repo_id", repo_id, "repo_name", repo_name, "head_id", head_id, "owner_id", owner_id, "size", size, "del_time", del_time, NULL); if (!trash_repo) return FALSE; SeafCommit *commit = seaf_commit_manager_get_commit_compatible (seaf->commit_mgr, repo_id, head_id); if (!commit) { seaf_warning ("Commit %s not found in repo %s\n", head_id, repo_id); g_object_unref (trash_repo); return TRUE; } g_object_set (trash_repo, "encrypted", commit->encrypted, NULL); seaf_commit_unref (commit); *trash_repos = g_list_prepend (*trash_repos, trash_repo); return TRUE; } GList * seaf_repo_manager_get_trash_repo_list (SeafRepoManager *mgr, int start, int limit, GError **error) { GList *trash_repos = NULL; int rc; if (start == -1 && limit == -1) rc = seaf_db_statement_foreach_row (mgr->seaf->db, "SELECT repo_id, repo_name, head_id, owner_id, " "size, del_time FROM RepoTrash ORDER BY del_time DESC", collect_trash_repo, &trash_repos, 0); else rc = seaf_db_statement_foreach_row (mgr->seaf->db, "SELECT repo_id, repo_name, head_id, owner_id, " "size, del_time FROM RepoTrash " "ORDER BY del_time DESC LIMIT ? OFFSET ?", collect_trash_repo, &trash_repos, 2, "int", limit, "int", start); if (rc < 0) { while (trash_repos) { g_object_unref (trash_repos->data); trash_repos = g_list_delete_link (trash_repos, trash_repos); } g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "Failed to get trashed repo from db."); return NULL; } return g_list_reverse (trash_repos); } GList * seaf_repo_manager_get_trash_repos_by_owner (SeafRepoManager *mgr, const char *owner, GError **error) { GList *trash_repos = NULL; int rc; rc = seaf_db_statement_foreach_row (mgr->seaf->db, "SELECT repo_id, repo_name, head_id, owner_id, " "size, del_time FROM RepoTrash WHERE owner_id = ?", collect_trash_repo, &trash_repos, 1, "string", owner); if (rc < 0) { while (trash_repos) { g_object_unref (trash_repos->data); trash_repos = g_list_delete_link (trash_repos, trash_repos); } g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "Failed to get trashed repo from db."); return NULL; } return trash_repos; } SeafileTrashRepo * seaf_repo_manager_get_repo_from_trash (SeafRepoManager *mgr, const char *repo_id) { SeafileTrashRepo *ret = NULL; GList *trash_repos = NULL; char *sql; int rc; sql = "SELECT repo_id, repo_name, head_id, owner_id, size, del_time FROM RepoTrash " "WHERE repo_id = ?"; rc = seaf_db_statement_foreach_row (mgr->seaf->db, sql, collect_trash_repo, &trash_repos, 1, "string", repo_id); if (rc < 0) return NULL; /* There should be only one results, since repo_id is a PK. */ if (trash_repos) ret = trash_repos->data; g_list_free (trash_repos); return ret; } int seaf_repo_manager_del_repo_from_trash (SeafRepoManager *mgr, const char *repo_id, GError **error) { /* As long as the repo is successfully moved into GarbageRepo table, * we consider this operation successful. */ if (add_deleted_repo_record (mgr, repo_id) < 0) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "DB error: Add deleted record"); return -1; } seaf_db_statement_query (mgr->seaf->db, "DELETE FROM RepoFileCount WHERE repo_id = ?", 1, "string", repo_id); seaf_db_statement_query (mgr->seaf->db, "DELETE FROM RepoTrash WHERE repo_id = ?", 1, "string", repo_id); seaf_db_statement_query (mgr->seaf->db, "DELETE FROM RepoInfo WHERE repo_id = ?", 1, "string", repo_id); return 0; } int seaf_repo_manager_empty_repo_trash (SeafRepoManager *mgr, GError **error) { GList *trash_repos = NULL, *ptr; SeafileTrashRepo *repo; trash_repos = seaf_repo_manager_get_trash_repo_list (mgr, -1, -1, error); if (*error) return -1; for (ptr = trash_repos; ptr; ptr = ptr->next) { repo = ptr->data; seaf_repo_manager_del_repo_from_trash (mgr, seafile_trash_repo_get_repo_id(repo), NULL); g_object_unref (repo); } g_list_free (trash_repos); return 0; } int seaf_repo_manager_empty_repo_trash_by_owner (SeafRepoManager *mgr, const char *owner, GError **error) { GList *trash_repos = NULL, *ptr; SeafileTrashRepo *repo; trash_repos = seaf_repo_manager_get_trash_repos_by_owner (mgr, owner, error); if (*error) return -1; for (ptr = trash_repos; ptr; ptr = ptr->next) { repo = ptr->data; seaf_repo_manager_del_repo_from_trash (mgr, seafile_trash_repo_get_repo_id(repo), NULL); g_object_unref (repo); } g_list_free (trash_repos); return 0; } int seaf_repo_manager_restore_repo_from_trash (SeafRepoManager *mgr, const char *repo_id, GError **error) { SeafileTrashRepo *repo = NULL; int ret = 0; gboolean exists = FALSE; gboolean db_err; const char *head_id = NULL; SeafCommit *commit = NULL; repo = seaf_repo_manager_get_repo_from_trash (mgr, repo_id); if (!repo) { seaf_warning ("Repo %.8s not found in trash.\n", repo_id); return -1; } SeafDBTrans *trans = seaf_db_begin_transaction (mgr->seaf->db); exists = seaf_db_trans_check_for_existence (trans, "SELECT 1 FROM Repo WHERE repo_id=?", &db_err, 1, "string", repo_id); if (!exists) { ret = seaf_db_trans_query (trans, "INSERT INTO Repo(repo_id) VALUES (?)", 1, "string", repo_id) < 0; if (ret < 0) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "DB error: Insert Repo."); seaf_db_rollback (trans); seaf_db_trans_close (trans); goto out; } } exists = seaf_db_trans_check_for_existence (trans, "SELECT 1 FROM RepoOwner WHERE repo_id=?", &db_err, 1, "string", repo_id); if (!exists) { ret = seaf_db_trans_query (trans, "INSERT INTO RepoOwner (repo_id, owner_id) VALUES (?, ?)", 2, "string", repo_id, "string", seafile_trash_repo_get_owner_id(repo)); if (ret < 0) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "DB error: Insert Repo Owner."); seaf_db_rollback (trans); seaf_db_trans_close (trans); goto out; } } exists = seaf_db_trans_check_for_existence (trans, "SELECT 1 FROM Branch WHERE repo_id=?", &db_err, 1, "string", repo_id); if (!exists) { ret = seaf_db_trans_query (trans, "INSERT INTO Branch (name, repo_id, commit_id) VALUES ('master', ?, ?)", 2, "string", repo_id, "string", seafile_trash_repo_get_head_id(repo)); if (ret < 0) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "DB error: Insert Branch."); seaf_db_rollback (trans); seaf_db_trans_close (trans); goto out; } } exists = seaf_db_trans_check_for_existence (trans, "SELECT 1 FROM RepoHead WHERE repo_id=?", &db_err, 1, "string", repo_id); if (!exists) { ret = seaf_db_trans_query (trans, "INSERT INTO RepoHead (repo_id, branch_name) VALUES (?, 'master')", 1, "string", repo_id); if (ret < 0) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "DB error: Set RepoHead."); seaf_db_rollback (trans); seaf_db_trans_close (trans); goto out; } } // Restore repo size exists = seaf_db_trans_check_for_existence (trans, "SELECT 1 FROM RepoSize WHERE repo_id=?", &db_err, 1, "string", repo_id); if (!exists) { ret = seaf_db_trans_query (trans, "INSERT INTO RepoSize (repo_id, size, head_id) VALUES (?, ?, ?)", 3, "string", repo_id, "int64", seafile_trash_repo_get_size (repo), "string", seafile_trash_repo_get_head_id (repo)); if (ret < 0) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "DB error: Insert Repo Size."); seaf_db_rollback (trans); seaf_db_trans_close (trans); goto out; } } // Restore repo info exists = seaf_db_trans_check_for_existence (trans, "SELECT 1 FROM RepoInfo WHERE repo_id=?", &db_err, 1, "string", repo_id); if (!exists) { head_id = seafile_trash_repo_get_head_id (repo); commit = seaf_commit_manager_get_commit_compatible (seaf->commit_mgr, repo_id, head_id); if (!commit) { seaf_warning ("Commit %.8s of repo %.8s not found.\n", repo_id, head_id); seaf_db_rollback (trans); seaf_db_trans_close (trans); ret = -1; goto out; } ret = seaf_db_trans_query (trans, "INSERT INTO RepoInfo (repo_id, name, update_time, version, is_encrypted, last_modifier) VALUES (?, ?, ?, ?, ?, ?)", 6, "string", repo_id, "string", seafile_trash_repo_get_repo_name (repo), "int64", commit->ctime, "int", commit->version, "int", commit->encrypted, "string", commit->creator_name); if (ret < 0) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "DB error: Insert Repo Info."); seaf_db_rollback (trans); seaf_db_trans_close (trans); goto out; } } ret = seaf_db_trans_query (trans, "DELETE FROM RepoTrash WHERE repo_id = ?", 1, "string", repo_id); if (ret < 0) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "DB error: delete from RepoTrash."); seaf_db_rollback (trans); seaf_db_trans_close (trans); goto out; } if (seaf_db_commit (trans) < 0) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "DB error: Failed to commit."); seaf_db_rollback (trans); ret = -1; } seaf_db_trans_close (trans); out: seaf_commit_unref (commit); g_object_unref (repo); return ret; } /* Web access permission. */ int seaf_repo_manager_set_access_property (SeafRepoManager *mgr, const char *repo_id, const char *ap) { int rc; if (seaf_repo_manager_query_access_property (mgr, repo_id) == NULL) { rc = seaf_db_statement_query (mgr->seaf->db, "INSERT INTO WebAP (repo_id, access_property) VALUES (?, ?)", 2, "string", repo_id, "string", ap); } else { rc = seaf_db_statement_query (mgr->seaf->db, "UPDATE WebAP SET access_property=? " "WHERE repo_id=?", 2, "string", ap, "string", repo_id); } if (rc < 0) { seaf_warning ("DB error when set access property for repo %s, %s.\n", repo_id, ap); return -1; } return 0; } static gboolean get_ap (SeafDBRow *row, void *data) { char **ap = data; *ap = g_strdup (seaf_db_row_get_column_text (row, 0)); return FALSE; } char * seaf_repo_manager_query_access_property (SeafRepoManager *mgr, const char *repo_id) { char *sql; char *ret = NULL; sql = "SELECT access_property FROM WebAP WHERE repo_id=?"; if (seaf_db_statement_foreach_row (mgr->seaf->db, sql, get_ap, &ret, 1, "string", repo_id) < 0) { seaf_warning ("DB error when get access property for repo %s.\n", repo_id); return NULL; } return ret; } /* Group repos. */ int seaf_repo_manager_add_group_repo (SeafRepoManager *mgr, const char *repo_id, int group_id, const char *owner, const char *permission, GError **error) { if (seaf_db_statement_query (mgr->seaf->db, "INSERT INTO RepoGroup (repo_id, group_id, user_name, permission) VALUES (?, ?, ?, ?)", 4, "string", repo_id, "int", group_id, "string", owner, "string", permission) < 0) return -1; return 0; } int seaf_repo_manager_del_group_repo (SeafRepoManager *mgr, const char *repo_id, int group_id, GError **error) { return seaf_db_statement_query (mgr->seaf->db, "DELETE FROM RepoGroup WHERE group_id=? " "AND repo_id=?", 2, "int", group_id, "string", repo_id); } static gboolean get_group_ids_cb (SeafDBRow *row, void *data) { GList **plist = data; int group_id = seaf_db_row_get_column_int (row, 0); *plist = g_list_prepend (*plist, (gpointer)(long)group_id); return TRUE; } GList * seaf_repo_manager_get_groups_by_repo (SeafRepoManager *mgr, const char *repo_id, GError **error) { char *sql; GList *group_ids = NULL; sql = "SELECT group_id FROM RepoGroup WHERE repo_id = ?"; if (seaf_db_statement_foreach_row (mgr->seaf->db, sql, get_group_ids_cb, &group_ids, 1, "string", repo_id) < 0) { g_list_free (group_ids); return NULL; } return g_list_reverse (group_ids); } static gboolean get_group_perms_cb (SeafDBRow *row, void *data) { GList **plist = data; GroupPerm *perm = g_new0 (GroupPerm, 1); perm->group_id = seaf_db_row_get_column_int (row, 0); const char *permission = seaf_db_row_get_column_text(row, 1); g_strlcpy (perm->permission, permission, sizeof(perm->permission)); *plist = g_list_prepend (*plist, perm); return TRUE; } GList * seaf_repo_manager_get_group_perm_by_repo (SeafRepoManager *mgr, const char *repo_id, GError **error) { char *sql; GList *group_perms = NULL, *p; sql = "SELECT group_id, permission FROM RepoGroup WHERE repo_id = ?"; if (seaf_db_statement_foreach_row (mgr->seaf->db, sql, get_group_perms_cb, &group_perms, 1, "string", repo_id) < 0) { for (p = group_perms; p != NULL; p = p->next) g_free (p->data); g_list_free (group_perms); return NULL; } return g_list_reverse (group_perms); } int seaf_repo_manager_set_group_repo_perm (SeafRepoManager *mgr, const char *repo_id, int group_id, const char *permission, GError **error) { return seaf_db_statement_query (mgr->seaf->db, "UPDATE RepoGroup SET permission=? WHERE " "repo_id=? AND group_id=?", 3, "string", permission, "string", repo_id, "int", group_id); } int seaf_repo_manager_set_subdir_group_perm_by_path (SeafRepoManager *mgr, const char *repo_id, const char *username, int group_id, const char *permission, const char *path) { return seaf_db_statement_query (mgr->seaf->db, "UPDATE RepoGroup SET permission=? WHERE repo_id IN " "(SELECT repo_id FROM VirtualRepo WHERE origin_repo=? AND path=?) " "AND group_id=? AND user_name=?", 5, "string", permission, "string", repo_id, "string", path, "int", group_id, "string", username); } static gboolean get_group_repoids_cb (SeafDBRow *row, void *data) { GList **p_list = data; char *repo_id = g_strdup ((const char *)seaf_db_row_get_column_text (row, 0)); *p_list = g_list_prepend (*p_list, repo_id); return TRUE; } GList * seaf_repo_manager_get_group_repoids (SeafRepoManager *mgr, int group_id, GError **error) { char *sql; GList *repo_ids = NULL; sql = "SELECT repo_id FROM RepoGroup WHERE group_id = ?"; if (seaf_db_statement_foreach_row (mgr->seaf->db, sql, get_group_repoids_cb, &repo_ids, 1, "int", group_id) < 0) return NULL; return g_list_reverse (repo_ids); } static gboolean get_group_repos_cb (SeafDBRow *row, void *data) { GList **p_list = data; SeafileRepo *srepo = NULL; const char *repo_id = seaf_db_row_get_column_text (row, 0); const char *vrepo_id = seaf_db_row_get_column_text (row, 1); int group_id = seaf_db_row_get_column_int (row, 2); const char *user_name = seaf_db_row_get_column_text (row, 3); const char *permission = seaf_db_row_get_column_text (row, 4); const char *commit_id = seaf_db_row_get_column_text (row, 5); gint64 size = seaf_db_row_get_column_int64 (row, 6); const char *repo_name = seaf_db_row_get_column_text (row, 9); gint64 update_time = seaf_db_row_get_column_int64 (row, 10); int version = seaf_db_row_get_column_int (row, 11); gboolean is_encrypted = seaf_db_row_get_column_int (row, 12) ? TRUE : FALSE; const char *last_modifier = seaf_db_row_get_column_text (row, 13); int status = seaf_db_row_get_column_int (row, 14); const char *type = seaf_db_row_get_column_text (row, 15); char *user_name_l = g_ascii_strdown (user_name, -1); srepo = g_object_new (SEAFILE_TYPE_REPO, "share_type", "group", "repo_id", repo_id, "id", repo_id, "head_cmmt_id", commit_id, "group_id", group_id, "user", user_name_l, "permission", permission, "is_virtual", (vrepo_id != NULL), "size", size, "status", status, NULL); g_free (user_name_l); if (srepo != NULL) { if (vrepo_id) { const char *origin_repo_id = seaf_db_row_get_column_text (row, 7); const char *origin_path = seaf_db_row_get_column_text (row, 8); const char *origin_repo_name = seaf_db_row_get_column_text (row, 16); g_object_set (srepo, "store_id", origin_repo_id, "origin_repo_id", origin_repo_id, "origin_repo_name", origin_repo_name, "origin_path", origin_path, NULL); } else { g_object_set (srepo, "store_id", repo_id, NULL); } if (repo_name) { g_object_set (srepo, "name", repo_name, "repo_name", repo_name, "last_modify", update_time, "last_modified", update_time, "version", version, "encrypted", is_encrypted, "last_modifier", last_modifier, NULL); } if (type) { g_object_set (srepo, "repo_type", type, NULL); } *p_list = g_list_prepend (*p_list, srepo); } return TRUE; } void seaf_fill_repo_obj_from_commit (GList **repos) { SeafileRepo *repo; SeafCommit *commit; char *repo_id; char *commit_id; char *repo_name = NULL; char *last_modifier = NULL; GList *p = *repos; GList *next; while (p) { repo = p->data; g_object_get (repo, "name", &repo_name, NULL); g_object_get (repo, "last_modifier", &last_modifier, NULL); if (!repo_name || !last_modifier) { g_object_get (repo, "repo_id", &repo_id, "head_cmmt_id", &commit_id, NULL); commit = seaf_commit_manager_get_commit_compatible (seaf->commit_mgr, repo_id, commit_id); if (!commit) { seaf_warning ("Commit %s not found in repo %s\n", commit_id, repo_id); g_object_unref (repo); next = p->next; *repos = g_list_delete_link (*repos, p); p = next; if (repo_name) g_free (repo_name); if (last_modifier) g_free (last_modifier); } else { g_object_set (repo, "name", commit->repo_name, "repo_name", commit->repo_name, "last_modify", commit->ctime, "last_modified", commit->ctime, "version", commit->version, "encrypted", commit->encrypted, "last_modifier", commit->creator_name, NULL); /* Set to database */ set_repo_commit_to_db (repo_id, commit->repo_name, commit->ctime, commit->version, commit->encrypted, commit->creator_name); seaf_commit_unref (commit); } g_free (repo_id); g_free (commit_id); } if (repo_name) g_free (repo_name); if (last_modifier) g_free (last_modifier); p = p->next; } } GList * seaf_repo_manager_get_repos_by_group (SeafRepoManager *mgr, int group_id, GError **error) { char *sql; GList *repos = NULL; GList *p; sql = "SELECT RepoGroup.repo_id, v.repo_id, " "group_id, user_name, permission, commit_id, s.size, " "v.origin_repo, v.path, i.name, " "i.update_time, i.version, i.is_encrypted, i.last_modifier, i.status, i.type, i2.name " "FROM RepoGroup LEFT JOIN VirtualRepo v ON " "RepoGroup.repo_id = v.repo_id " "LEFT JOIN RepoInfo i ON RepoGroup.repo_id = i.repo_id " "LEFT JOIN RepoInfo i2 ON v.origin_repo = i2.repo_id " "LEFT JOIN RepoSize s ON RepoGroup.repo_id = s.repo_id, " "Branch WHERE group_id = ? AND " "RepoGroup.repo_id = Branch.repo_id AND " "Branch.name = 'master'"; if (seaf_db_statement_foreach_row (mgr->seaf->db, sql, get_group_repos_cb, &repos, 1, "int", group_id) < 0) { for (p = repos; p; p = p->next) { g_object_unref (p->data); } g_list_free (repos); g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "Failed to get repos by group from db."); return NULL; } seaf_fill_repo_obj_from_commit (&repos); return g_list_reverse (repos); } GList * seaf_repo_manager_get_group_repos_by_owner (SeafRepoManager *mgr, const char *owner, GError **error) { char *sql; GList *repos = NULL; GList *p; sql = "SELECT RepoGroup.repo_id, v.repo_id, " "group_id, user_name, permission, commit_id, s.size, " "v.origin_repo, v.path, i.name, " "i.update_time, i.version, i.is_encrypted, i.last_modifier, i.status, i.type, i2.name " "FROM RepoGroup LEFT JOIN VirtualRepo v ON " "RepoGroup.repo_id = v.repo_id " "LEFT JOIN RepoInfo i ON RepoGroup.repo_id = i.repo_id " "LEFT JOIN RepoInfo i2 ON v.origin_repo = i2.repo_id " "LEFT JOIN RepoSize s ON RepoGroup.repo_id = s.repo_id, " "Branch WHERE user_name = ? AND " "RepoGroup.repo_id = Branch.repo_id AND " "Branch.name = 'master'"; if (seaf_db_statement_foreach_row (mgr->seaf->db, sql, get_group_repos_cb, &repos, 1, "string", owner) < 0) { for (p = repos; p; p = p->next) { g_object_unref (p->data); } g_list_free (repos); return NULL; } seaf_fill_repo_obj_from_commit (&repos); return g_list_reverse (repos); } static gboolean get_group_repo_owner (SeafDBRow *row, void *data) { char **share_from = data; const char *owner = (const char *) seaf_db_row_get_column_text (row, 0); *share_from = g_ascii_strdown (owner, -1); /* There should be only one result. */ return FALSE; } char * seaf_repo_manager_get_group_repo_owner (SeafRepoManager *mgr, const char *repo_id, GError **error) { char *sql; char *ret = NULL; sql = "SELECT user_name FROM RepoGroup WHERE repo_id = ?"; if (seaf_db_statement_foreach_row (mgr->seaf->db, sql, get_group_repo_owner, &ret, 1, "string", repo_id) < 0) { seaf_warning ("DB error when get repo share from for repo %s.\n", repo_id); return NULL; } return ret; } int seaf_repo_manager_remove_group_repos (SeafRepoManager *mgr, int group_id, const char *owner, GError **error) { SeafDB *db = mgr->seaf->db; int rc; if (!owner) { rc = seaf_db_statement_query (db, "DELETE FROM RepoGroup WHERE group_id=?", 1, "int", group_id); } else { rc = seaf_db_statement_query (db, "DELETE FROM RepoGroup WHERE group_id=? AND " "user_name = ?", 2, "int", group_id, "string", owner); } return rc; } /* Inner public repos */ int seaf_repo_manager_set_inner_pub_repo (SeafRepoManager *mgr, const char *repo_id, const char *permission) { SeafDB *db = mgr->seaf->db; char sql[256]; if (seaf_db_type(db) == SEAF_DB_TYPE_PGSQL) { gboolean err; snprintf(sql, sizeof(sql), "SELECT repo_id FROM InnerPubRepo WHERE repo_id=?"); if (seaf_db_statement_exists (db, sql, &err, 1, "string", repo_id)) snprintf(sql, sizeof(sql), "UPDATE InnerPubRepo SET permission='%s' " "WHERE repo_id='%s'", permission, repo_id); else snprintf(sql, sizeof(sql), "INSERT INTO InnerPubRepo (repo_id, permission) VALUES " "('%s', '%s')", repo_id, permission); if (err) return -1; return seaf_db_query (db, sql); } else { return seaf_db_statement_query (db, "REPLACE INTO InnerPubRepo (repo_id, permission) VALUES (?, ?)", 2, "string", repo_id, "string", permission); } return -1; } int seaf_repo_manager_unset_inner_pub_repo (SeafRepoManager *mgr, const char *repo_id) { return seaf_db_statement_query (mgr->seaf->db, "DELETE FROM InnerPubRepo WHERE repo_id = ?", 1, "string", repo_id); } gboolean seaf_repo_manager_is_inner_pub_repo (SeafRepoManager *mgr, const char *repo_id) { gboolean db_err = FALSE; return seaf_db_statement_exists (mgr->seaf->db, "SELECT repo_id FROM InnerPubRepo WHERE repo_id=?", &db_err, 1, "string", repo_id); } static gboolean collect_public_repos (SeafDBRow *row, void *data) { GList **ret = (GList **)data; SeafileRepo *srepo; const char *repo_id, *vrepo_id, *owner, *permission, *commit_id; gint64 size; repo_id = seaf_db_row_get_column_text (row, 0); vrepo_id = seaf_db_row_get_column_text (row, 1); owner = seaf_db_row_get_column_text (row, 2); permission = seaf_db_row_get_column_text (row, 3); commit_id = seaf_db_row_get_column_text (row, 4); size = seaf_db_row_get_column_int64 (row, 5); const char *repo_name = seaf_db_row_get_column_text (row, 8); gint64 update_time = seaf_db_row_get_column_int64 (row, 9); int version = seaf_db_row_get_column_int (row, 10); gboolean is_encrypted = seaf_db_row_get_column_int (row, 11) ? TRUE : FALSE; const char *last_modifier = seaf_db_row_get_column_text (row, 12); int status = seaf_db_row_get_column_int (row, 13); const char *type = seaf_db_row_get_column_text (row, 14); char *owner_l = g_ascii_strdown (owner, -1); srepo = g_object_new (SEAFILE_TYPE_REPO, "share_type", "public", "repo_id", repo_id, "id", repo_id, "head_cmmt_id", commit_id, "permission", permission, "user", owner_l, "is_virtual", (vrepo_id != NULL), "size", size, "status", status, NULL); g_free (owner_l); if (srepo) { if (vrepo_id) { const char *origin_repo_id = seaf_db_row_get_column_text (row, 6); const char *origin_path = seaf_db_row_get_column_text (row, 7); g_object_set (srepo, "store_id", origin_repo_id, "origin_repo_id", origin_repo_id, "origin_path", origin_path, NULL); } else { g_object_set (srepo, "store_id", repo_id, NULL); } if (repo_name) { g_object_set (srepo, "name", repo_name, "repo_name", repo_name, "last_modify", update_time, "last_modified", update_time, "version", version, "encrypted", is_encrypted, "last_modifier", last_modifier, NULL); } if (type) { g_object_set (srepo, "repo_type", type, NULL); } *ret = g_list_prepend (*ret, srepo); } return TRUE; } GList * seaf_repo_manager_list_inner_pub_repos (SeafRepoManager *mgr, gboolean *db_err) { GList *ret = NULL, *p; char *sql; sql = "SELECT InnerPubRepo.repo_id, VirtualRepo.repo_id, " "owner_id, permission, commit_id, s.size, " "VirtualRepo.origin_repo, VirtualRepo.path, i.name, " "i.update_time, i.version, i.is_encrypted, i.last_modifier, i.status, i.type " "FROM InnerPubRepo LEFT JOIN VirtualRepo ON " "InnerPubRepo.repo_id=VirtualRepo.repo_id " "LEFT JOIN RepoInfo i ON InnerPubRepo.repo_id = i.repo_id " "LEFT JOIN RepoSize s ON InnerPubRepo.repo_id = s.repo_id, RepoOwner, Branch " "WHERE InnerPubRepo.repo_id=RepoOwner.repo_id AND " "InnerPubRepo.repo_id = Branch.repo_id AND Branch.name = 'master'"; if (seaf_db_statement_foreach_row (mgr->seaf->db, sql, collect_public_repos, &ret, 0) < 0) { for (p = ret; p != NULL; p = p->next) g_object_unref (p->data); g_list_free (ret); if (db_err) *db_err = TRUE; return NULL; } seaf_fill_repo_obj_from_commit (&ret); return g_list_reverse (ret); } gint64 seaf_repo_manager_count_inner_pub_repos (SeafRepoManager *mgr) { char sql[256]; snprintf (sql, 256, "SELECT COUNT(*) FROM InnerPubRepo"); return seaf_db_get_int64(mgr->seaf->db, sql); } GList * seaf_repo_manager_list_inner_pub_repos_by_owner (SeafRepoManager *mgr, const char *user) { GList *ret = NULL, *p; char *sql; sql = "SELECT InnerPubRepo.repo_id, VirtualRepo.repo_id, " "owner_id, permission, commit_id, s.size, " "VirtualRepo.origin_repo, VirtualRepo.path, i.name, " "i.update_time, i.version, i.is_encrypted, i.last_modifier, i.status, i.type " "FROM InnerPubRepo LEFT JOIN VirtualRepo ON " "InnerPubRepo.repo_id=VirtualRepo.repo_id " "LEFT JOIN RepoInfo i ON InnerPubRepo.repo_id = i.repo_id " "LEFT JOIN RepoSize s ON InnerPubRepo.repo_id = s.repo_id, RepoOwner, Branch " "WHERE InnerPubRepo.repo_id=RepoOwner.repo_id AND owner_id=? " "AND InnerPubRepo.repo_id = Branch.repo_id AND Branch.name = 'master'"; if (seaf_db_statement_foreach_row (mgr->seaf->db, sql, collect_public_repos, &ret, 1, "string", user) < 0) { for (p = ret; p != NULL; p = p->next) g_object_unref (p->data); g_list_free (ret); return NULL; } seaf_fill_repo_obj_from_commit (&ret); return g_list_reverse (ret); } char * seaf_repo_manager_get_inner_pub_repo_perm (SeafRepoManager *mgr, const char *repo_id) { char *sql; sql = "SELECT permission FROM InnerPubRepo WHERE repo_id=?"; return seaf_db_statement_get_string(mgr->seaf->db, sql, 1, "string", repo_id); } int seaf_repo_manager_is_valid_filename (SeafRepoManager *mgr, const char *repo_id, const char *filename, GError **error) { if (should_ignore_file(filename, NULL)) return 0; else return 1; } typedef struct _RepoCryptCompat { const char *magic; const char *pwd_hash; const char *pwd_hash_algo; const char *pwd_hash_params; } RepoCryptInfo; static RepoCryptInfo * repo_crypt_info_new (const char *magic, const char *pwd_hash, const char *algo, const char *params) { RepoCryptInfo *crypt_info = g_new0 (RepoCryptInfo, 1); crypt_info->magic = magic; crypt_info->pwd_hash = pwd_hash; crypt_info->pwd_hash_algo = algo; crypt_info->pwd_hash_params = params; return crypt_info; } static int create_repo_common (SeafRepoManager *mgr, const char *repo_id, const char *repo_name, const char *repo_desc, const char *user, const char *random_key, const char *salt, int enc_version, RepoCryptInfo *crypt_info, GError **error) { SeafRepo *repo = NULL; SeafCommit *commit = NULL; SeafBranch *master = NULL; int ret = -1; if (enc_version != 4 && enc_version != 3 && enc_version != 2 && enc_version != -1) { seaf_warning ("Unsupported enc version %d.\n", enc_version); g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Unsupported encryption version"); return -1; } if (crypt_info && crypt_info->pwd_hash_algo) { if (g_strcmp0 (crypt_info->pwd_hash_algo, PWD_HASH_PDKDF2) != 0 && g_strcmp0 (crypt_info->pwd_hash_algo, PWD_HASH_ARGON2ID) !=0) { seaf_warning ("Unsupported enc algothrims %s.\n", crypt_info->pwd_hash_algo); g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Unsupported encryption algothrims"); return -1; } if (!crypt_info->pwd_hash || strlen(crypt_info->pwd_hash) != 64) { seaf_warning ("Bad pwd_hash.\n"); g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Bad pwd_hash"); return -1; } } if (enc_version >= 2) { if (!crypt_info->pwd_hash_algo && (!crypt_info->magic || strlen(crypt_info->magic) != 64)) { seaf_warning ("Bad magic.\n"); g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Bad magic"); return -1; } if (!random_key || strlen(random_key) != 96) { seaf_warning ("Bad random key.\n"); g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Bad random key"); return -1; } } if (enc_version >= 3) { if (!salt || strlen(salt) != 64) { seaf_warning ("Bad salt.\n"); g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Bad salt"); return -1; } } repo = seaf_repo_new (repo_id, repo_name, repo_desc); repo->no_local_history = TRUE; if (enc_version >= 2) { repo->encrypted = TRUE; repo->enc_version = enc_version; if (!crypt_info->pwd_hash_algo) memcpy (repo->magic, crypt_info->magic, 64); memcpy (repo->random_key, random_key, 96); } if (enc_version >= 3) memcpy (repo->salt, salt, 64); if (crypt_info && crypt_info->pwd_hash_algo) { // set pwd_hash fields here. memcpy (repo->pwd_hash, crypt_info->pwd_hash, 64); repo->pwd_hash_algo = g_strdup (crypt_info->pwd_hash_algo); repo->pwd_hash_params = g_strdup (crypt_info->pwd_hash_params); } repo->version = CURRENT_REPO_VERSION; memcpy (repo->store_id, repo_id, 36); commit = seaf_commit_new (NULL, repo->id, EMPTY_SHA1, /* root id */ user, /* creator */ EMPTY_SHA1, /* creator id */ "Created library", /* description */ 0); /* ctime */ seaf_repo_to_commit (repo, commit); if (seaf_commit_manager_add_commit (seaf->commit_mgr, commit) < 0) { seaf_warning ("Failed to add commit.\n"); g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "Failed to add commit"); goto out; } master = seaf_branch_new ("master", repo->id, commit->commit_id); if (seaf_branch_manager_add_branch (seaf->branch_mgr, master) < 0) { seaf_warning ("Failed to add branch.\n"); g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "Failed to add branch"); goto out; } if (seaf_repo_set_head (repo, master) < 0) { seaf_warning ("Failed to set repo head.\n"); g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "Failed to set repo head."); goto out; } if (seaf_repo_manager_add_repo (mgr, repo) < 0) { seaf_warning ("Failed to add repo.\n"); g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "Failed to add repo."); goto out; } seaf_repo_manager_update_repo_info (mgr, repo->id, repo->head->commit_id); ret = 0; out: if (repo) seaf_repo_unref (repo); if (commit) seaf_commit_unref (commit); if (master) seaf_branch_unref (master); return ret; } char * seaf_repo_manager_create_new_repo (SeafRepoManager *mgr, const char *repo_name, const char *repo_desc, const char *owner_email, const char *passwd, int enc_version, const char *pwd_hash_algo, const char *pwd_hash_params, GError **error) { char *repo_id = NULL; char salt[65], magic[65], pwd_hash[65], random_key[97]; const char *algo = pwd_hash_algo; const char *params = pwd_hash_params; repo_id = gen_uuid (); if (passwd && passwd[0] != 0) { if (seafile_generate_repo_salt (salt) < 0) { goto bad; } if (algo != NULL) { seafile_generate_pwd_hash (enc_version, repo_id, passwd, salt, algo, params, pwd_hash); } else { seafile_generate_magic (enc_version, repo_id, passwd, salt, magic); } if (seafile_generate_random_key (passwd, enc_version, salt, random_key) < 0) { goto bad; } } int rc; if (passwd) { RepoCryptInfo *crypt_info = repo_crypt_info_new (magic, pwd_hash, algo, params); rc = create_repo_common (mgr, repo_id, repo_name, repo_desc, owner_email, random_key, salt, enc_version, crypt_info, error); g_free (crypt_info); } else rc = create_repo_common (mgr, repo_id, repo_name, repo_desc, owner_email, NULL, NULL, -1, NULL, error); if (rc < 0) goto bad; if (seaf_repo_manager_set_repo_owner (mgr, repo_id, owner_email) < 0) { seaf_warning ("Failed to set repo owner.\n"); g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "Failed to set repo owner."); goto bad; } return repo_id; bad: if (repo_id) g_free (repo_id); return NULL; } char * seaf_repo_manager_create_enc_repo (SeafRepoManager *mgr, const char *repo_id, const char *repo_name, const char *repo_desc, const char *owner_email, const char *magic, const char *random_key, const char *salt, int enc_version, const char *pwd_hash, const char *pwd_hash_algo, const char *pwd_hash_params, GError **error) { if (!repo_id || !is_uuid_valid (repo_id)) { seaf_warning ("Invalid repo_id.\n"); g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid repo id"); return NULL; } if (seaf_repo_manager_repo_exists (mgr, repo_id)) { seaf_warning ("Repo %s exists, refuse to create.\n", repo_id); g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Repo already exists"); return NULL; } RepoCryptInfo *crypt_info = repo_crypt_info_new (magic, pwd_hash, pwd_hash_algo, pwd_hash_params); if (create_repo_common (mgr, repo_id, repo_name, repo_desc, owner_email, random_key, salt, enc_version, crypt_info, error) < 0) { g_free (crypt_info); return NULL; } g_free (crypt_info); if (seaf_repo_manager_set_repo_owner (mgr, repo_id, owner_email) < 0) { seaf_warning ("Failed to set repo owner.\n"); g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "Failed to set repo owner."); return NULL; } return g_strdup (repo_id); } static int reap_token (void *data) { SeafRepoManager *mgr = data; GHashTableIter iter; gpointer key, value; DecryptedToken *t; pthread_rwlock_wrlock (&mgr->priv->lock); gint64 now = (gint64)time(NULL); g_hash_table_iter_init (&iter, mgr->priv->decrypted_tokens); while (g_hash_table_iter_next (&iter, &key, &value)) { t = value; if (now >= t->reap_time) g_hash_table_iter_remove (&iter); } pthread_rwlock_unlock (&mgr->priv->lock); return TRUE; } static void decrypted_token_free (DecryptedToken *token) { if (!token) return; g_free (token->token); g_free (token); } void seaf_repo_manager_add_decrypted_token (SeafRepoManager *mgr, const char *encrypted_token, const char *session_key, const char *decrypted_token) { char key[256]; DecryptedToken *token; snprintf (key, sizeof(key), "%s%s", encrypted_token, session_key); key[255] = 0; pthread_rwlock_wrlock (&mgr->priv->lock); token = g_new0 (DecryptedToken, 1); token->token = g_strdup(decrypted_token); token->reap_time = (gint64)time(NULL) + DECRYPTED_TOKEN_TTL; g_hash_table_insert (mgr->priv->decrypted_tokens, g_strdup(key), token); pthread_rwlock_unlock (&mgr->priv->lock); } char * seaf_repo_manager_get_decrypted_token (SeafRepoManager *mgr, const char *encrypted_token, const char *session_key) { char key[256]; DecryptedToken *token; snprintf (key, sizeof(key), "%s%s", encrypted_token, session_key); key[255] = 0; pthread_rwlock_rdlock (&mgr->priv->lock); token = g_hash_table_lookup (mgr->priv->decrypted_tokens, key); pthread_rwlock_unlock (&mgr->priv->lock); if (token) return g_strdup(token->token); return NULL; } static gboolean get_shared_users (SeafDBRow *row, void *data) { GList **shared_users = data; const char *user = seaf_db_row_get_column_text (row, 0); const char *perm = seaf_db_row_get_column_text (row, 1); const char *repo_id = seaf_db_row_get_column_text (row, 2); SeafileSharedUser *uobj = g_object_new (SEAFILE_TYPE_SHARED_USER, "repo_id", repo_id, "user", user, "perm", perm, NULL); *shared_users = g_list_prepend (*shared_users, uobj); return TRUE; } GList * seaf_repo_manager_get_shared_users_for_subdir (SeafRepoManager *mgr, const char *repo_id, const char *path, const char *from_user, GError **error) { GList *shared_users = NULL; int ret = seaf_db_statement_foreach_row (mgr->seaf->db, "SELECT to_email, permission, v.repo_id " "FROM SharedRepo s, VirtualRepo v " "WHERE s.repo_id = v.repo_id AND v.origin_repo = ? " "AND v.path = ? AND s.from_email = ?", get_shared_users, &shared_users, 3, "string", repo_id, "string", path, "string", from_user); if (ret < 0) { seaf_warning ("Failed to get shared users for %.8s(%s).\n", repo_id, path); g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "Failed to get shared users for subdir from db"); while (shared_users) { g_object_unref (shared_users->data); shared_users = g_list_delete_link (shared_users, shared_users); } return NULL; } return shared_users; } static gboolean get_shared_groups (SeafDBRow *row, void *data) { GList **shared_groups = data; int group = seaf_db_row_get_column_int (row, 0); const char *perm = seaf_db_row_get_column_text (row, 1); const char *repo_id = seaf_db_row_get_column_text (row, 2); SeafileSharedGroup *gobj = g_object_new (SEAFILE_TYPE_SHARED_GROUP, "repo_id", repo_id, "group_id", group, "perm", perm, NULL); *shared_groups = g_list_prepend (*shared_groups, gobj); return TRUE; } GList * seaf_repo_manager_get_shared_groups_for_subdir (SeafRepoManager *mgr, const char *repo_id, const char *path, const char *from_user, GError **error) { GList *shared_groups = NULL; int ret = seaf_db_statement_foreach_row (mgr->seaf->db, "SELECT group_id, permission, v.repo_id " "FROM RepoGroup r, VirtualRepo v " "WHERE r.repo_id = v.repo_id AND v.origin_repo = ? " "AND v.path = ? AND r.user_name = ?", get_shared_groups, &shared_groups, 3, "string", repo_id, "string", path, "string", from_user); if (ret < 0) { seaf_warning ("Failed to get shared groups for %.8s(%s).\n", repo_id, path); g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "Failed to get shared groups fro subdir from db"); while (shared_groups) { g_object_unref (shared_groups->data); shared_groups = g_list_delete_link (shared_groups, shared_groups); } return NULL; } return shared_groups; } int seaf_repo_manager_edit_repo (const char *repo_id, const char *name, const char *description, const char *user, GError **error) { SeafRepo *repo = NULL; SeafCommit *commit = NULL, *parent = NULL; int ret = 0; if (!name && !description) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "At least one argument should be non-null"); return -1; } if (!is_uuid_valid (repo_id)) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid repo id"); return -1; } retry: repo = seaf_repo_manager_get_repo (seaf->repo_mgr, repo_id); if (!repo) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "No such library"); return -1; } if (!name) name = repo->name; if (!description) description = repo->desc; /* * We only change repo_name or repo_desc, so just copy the head commit * and change these two fields. */ parent = seaf_commit_manager_get_commit (seaf->commit_mgr, repo->id, repo->version, repo->head->commit_id); if (!parent) { seaf_warning ("Failed to get commit %s:%s.\n", repo->id, repo->head->commit_id); ret = -1; goto out; } if (!user) { user = parent->creator_name; } commit = seaf_commit_new (NULL, repo->id, parent->root_id, user, EMPTY_SHA1, "Changed library name or description", 0); commit->parent_id = g_strdup(parent->commit_id); seaf_repo_to_commit (repo, commit); g_free (commit->repo_name); commit->repo_name = g_strdup(name); g_free (commit->repo_desc); commit->repo_desc = g_strdup(description); if (seaf_commit_manager_add_commit (seaf->commit_mgr, commit) < 0) { ret = -1; goto out; } seaf_branch_set_commit (repo->head, commit->commit_id); if (seaf_branch_manager_test_and_update_branch (seaf->branch_mgr, repo->head, parent->commit_id, FALSE, NULL, NULL, NULL) < 0) { seaf_repo_unref (repo); seaf_commit_unref (commit); seaf_commit_unref (parent); repo = NULL; commit = NULL; parent = NULL; goto retry; } seaf_repo_manager_update_repo_info (seaf->repo_mgr, repo_id, repo->head->commit_id); out: seaf_commit_unref (commit); seaf_commit_unref (parent); seaf_repo_unref (repo); return ret; } gboolean get_total_file_number_cb (SeafDBRow *row, void *vdata) { gint64 *data = (gint64 *)vdata; gint64 count = seaf_db_row_get_column_int64 (row, 0); *data = count; return FALSE; } gint64 seaf_get_total_file_number (GError **error) { gint64 count = 0; int ret = seaf_db_statement_foreach_row (seaf->db, "SELECT SUM(file_count) FROM RepoFileCount f " "LEFT JOIN VirtualRepo v " "ON f.repo_id=v.repo_id," "Repo r " "WHERE v.repo_id IS NULL AND " "f.repo_id=r.repo_id", get_total_file_number_cb, &count, 0); if (ret < 0) { seaf_warning ("Failed to get total file number.\n"); g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "Failed to get total file number from db."); return -1; } return count; } gboolean get_total_storage_cb(SeafDBRow *row, void *vdata) { gint64 *data = (gint64 *)vdata; gint64 size = seaf_db_row_get_column_int64 (row, 0); *data = size; return FALSE; } gint64 seaf_get_total_storage (GError **error) { gint64 size = 0; int ret; if (seaf_db_type(seaf->db) == SEAF_DB_TYPE_PGSQL) { ret = seaf_db_statement_foreach_row (seaf->db, "SELECT SUM(\"size\") FROM RepoSize s " "LEFT JOIN VirtualRepo v " "ON s.repo_id=v.repo_id " "WHERE v.repo_id IS NULL", get_total_storage_cb, &size, 0); } else { ret = seaf_db_statement_foreach_row (seaf->db, "SELECT SUM(size) FROM RepoSize s " "LEFT JOIN VirtualRepo v " "ON s.repo_id=v.repo_id " "WHERE v.repo_id IS NULL", get_total_storage_cb, &size, 0); } if (ret < 0) { seaf_warning ("Failed to get total storage occupation.\n"); g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "Failed to get total storage occupation from db."); return -1; } return size; } /* Online GC related */ char * seaf_repo_get_current_gc_id (SeafRepo *repo) { if (seaf_db_type(seaf->db) == SEAF_DB_TYPE_SQLITE) return NULL; char *sql = "SELECT gc_id FROM GCID WHERE repo_id = ?"; char *gc_id; if (!repo->virtual_info) gc_id = seaf_db_statement_get_string (seaf->db, sql, 1, "string", repo->id); else { gc_id = seaf_db_statement_get_string (seaf->db, sql, 1, "string", repo->store_id); } return gc_id; } char * seaf_repo_get_last_gc_id (SeafRepo *repo, const char *client_id) { if (seaf_db_type(seaf->db) == SEAF_DB_TYPE_SQLITE) return NULL; char *sql = "SELECT gc_id FROM LastGCID WHERE repo_id = ? AND client_id = ?"; char *gc_id; gc_id = seaf_db_statement_get_string (seaf->db, sql, 2, "string", repo->id, "string", client_id); return gc_id; } gboolean seaf_repo_has_last_gc_id (SeafRepo *repo, const char *client_id) { if (seaf_db_type(seaf->db) == SEAF_DB_TYPE_SQLITE) return FALSE; char *sql = "SELECT 1 FROM LastGCID WHERE repo_id = ? AND client_id = ?"; gboolean db_err; return seaf_db_statement_exists (seaf->db, sql, &db_err, 2, "string", repo->id, "string", client_id); } int seaf_repo_set_last_gc_id (SeafRepo *repo, const char *client_id, const char *gc_id) { if (seaf_db_type(seaf->db) == SEAF_DB_TYPE_SQLITE) return 0; gboolean id_exists, db_err = FALSE; char *sql; int ret = 0; sql = "SELECT 1 FROM LastGCID WHERE repo_id = ? AND client_id = ?"; id_exists = seaf_db_statement_exists (seaf->db, sql, &db_err, 2, "string", repo->id, "string", client_id); if (id_exists) { sql = "UPDATE LastGCID SET gc_id = ? WHERE repo_id = ? AND client_id = ?"; ret = seaf_db_statement_query (seaf->db, sql, 3, "string", gc_id, "string", repo->id, "string", client_id); } else { sql = "INSERT INTO LastGCID (repo_id, client_id, gc_id) VALUES (?, ?, ?)"; ret = seaf_db_statement_query (seaf->db, sql, 3, "string", repo->id, "string", client_id, "string", gc_id); } return ret; } int seaf_repo_remove_last_gc_id (SeafRepo *repo, const char *client_id) { if (seaf_db_type(seaf->db) == SEAF_DB_TYPE_SQLITE) return 0; char *sql = "DELETE FROM LastGCID WHERE repo_id = ? AND client_id = ?"; seaf_db_statement_query (seaf->db, sql, 2, "string", repo->id, "string", client_id); return 0; } int seaf_repo_manager_add_upload_tmp_file (SeafRepoManager *mgr, const char *repo_id, const char *file_path, const char *tmp_file, GError **error) { char *file_path_with_slash = NULL; if (file_path[0] == '/') { file_path_with_slash = g_strdup(file_path); } else { file_path_with_slash = g_strconcat("/", file_path, NULL); } int ret = seaf_db_statement_query (mgr->seaf->db, "INSERT INTO WebUploadTempFiles " "(repo_id, file_path, tmp_file_path) " "VALUES (?, ?, ?)", 3, "string", repo_id, "string", file_path_with_slash, "string", tmp_file); if (ret < 0) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "Failed to add upload tmp file record to db."); } g_free (file_path_with_slash); return ret; } int seaf_repo_manager_del_upload_tmp_file (SeafRepoManager *mgr, const char *repo_id, const char *file_path, GError **error) { char *file_path_with_slash = NULL, *file_path_no_slash = NULL; /* Due to a bug in early versions of 7.0, some file_path may be stored in the db without * a leading slash. To be compatible with those records, we need to check the path * with and without leading slash. */ if (file_path[0] == '/') { file_path_with_slash = g_strdup(file_path); file_path_no_slash = g_strdup(file_path+1); } else { file_path_with_slash = g_strconcat("/", file_path, NULL); file_path_no_slash = g_strdup(file_path); } int ret = seaf_db_statement_query (mgr->seaf->db, "DELETE FROM WebUploadTempFiles WHERE " "repo_id = ? AND file_path IN (?, ?)", 3, "string", repo_id, "string", file_path_with_slash, "string", file_path_no_slash); if (ret < 0) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "Failed to delete upload tmp file record from db."); } g_free (file_path_with_slash); g_free (file_path_no_slash); return ret; } static gboolean get_tmp_file_path (SeafDBRow *row, void *data) { char **path = data; *path = g_strdup (seaf_db_row_get_column_text (row, 0)); return FALSE; } char * seaf_repo_manager_get_upload_tmp_file (SeafRepoManager *mgr, const char *repo_id, const char *file_path, GError **error) { char *tmp_file_path = NULL; char *file_path_with_slash = NULL, *file_path_no_slash = NULL; /* Due to a bug in early versions of 7.0, some file_path may be stored in the db without * a leading slash. To be compatible with those records, we need to check the path * with and without leading slash. * The correct file_path in db should be with a leading slash. */ if (file_path[0] == '/') { file_path_with_slash = g_strdup(file_path); file_path_no_slash = g_strdup(file_path+1); } else { file_path_with_slash = g_strconcat("/", file_path, NULL); file_path_no_slash = g_strdup(file_path); } int ret = seaf_db_statement_foreach_row (mgr->seaf->db, "SELECT tmp_file_path FROM WebUploadTempFiles " "WHERE repo_id = ? AND file_path = ?", get_tmp_file_path, &tmp_file_path, 2, "string", repo_id, "string", file_path_with_slash); if (ret < 0) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "Failed to get upload temp file path from db."); goto out; } if (!tmp_file_path) { /* Try file_path without slash. */ int ret = seaf_db_statement_foreach_row (mgr->seaf->db, "SELECT tmp_file_path FROM WebUploadTempFiles " "WHERE repo_id = ? AND file_path = ?", get_tmp_file_path, &tmp_file_path, 2, "string", repo_id, "string", file_path_no_slash); if (ret < 0) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "Failed to get upload temp file path from db."); goto out; } } out: g_free (file_path_with_slash); g_free (file_path_no_slash); return tmp_file_path; } gint64 seaf_repo_manager_get_upload_tmp_file_offset (SeafRepoManager *mgr, const char *repo_id, const char *file_path, GError **error) { char *tmp_file_path = NULL; SeafStat file_stat; tmp_file_path = seaf_repo_manager_get_upload_tmp_file (mgr, repo_id, file_path, error); if (*error) { return -1; } if (!tmp_file_path) return 0; if (seaf_stat (tmp_file_path, &file_stat) < 0) { if (errno == ENOENT) { seaf_message ("Temp file %s doesn't exist, remove reocrd from db.\n", tmp_file_path); if (seaf_repo_manager_del_upload_tmp_file (mgr, repo_id, file_path, error) < 0) { g_free (tmp_file_path); return -1; } return 0; } seaf_warning ("Failed to stat temp file %s: %s.\n", tmp_file_path, strerror(errno)); g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "Failed to stat temp file."); g_free (tmp_file_path); return -1; } g_free (tmp_file_path); return file_stat.st_size; } void seaf_repo_manager_update_repo_info (SeafRepoManager *mgr, const char *repo_id, const char *head_commit_id) { SeafCommit *head; head = seaf_commit_manager_get_commit (seaf->commit_mgr, repo_id, 1, head_commit_id); if (!head) { seaf_warning ("Failed to get commit %s:%s.\n", repo_id, head_commit_id); return; } set_repo_commit_to_db (repo_id, head->repo_name, head->ctime, head->version, (head->encrypted ? 1 : 0), head->creator_name); seaf_commit_unref (head); } char * seaf_get_trash_repo_owner (const char *repo_id) { char *sql = "SELECT owner_id from RepoTrash WHERE repo_id = ?"; return seaf_db_statement_get_string(seaf->db, sql, 1, "string", repo_id); } GObject * seaf_get_group_shared_repo_by_path (SeafRepoManager *mgr, const char *repo_id, const char *path, int group_id, gboolean is_org, GError **error) { char *sql; char *real_repo_id = NULL; GList *repo = NULL; GObject *ret = NULL; /* If path is NULL, 'repo_id' represents for the repo we want, * otherwise, 'repo_id' represents for the origin repo, * find virtual repo by path first. */ if (path != NULL) { real_repo_id = seaf_repo_manager_get_virtual_repo_id (mgr, repo_id, path, NULL); if (!real_repo_id) { seaf_warning ("Failed to get virtual repo_id by path %s, origin_repo: %s\n", path, repo_id); return NULL; } } if (!real_repo_id) real_repo_id = g_strdup (repo_id); if (!is_org) sql = "SELECT RepoGroup.repo_id, v.repo_id, " "group_id, user_name, permission, commit_id, s.size, " "v.origin_repo, v.path, i.name, " "i.update_time, i.version, i.is_encrypted, i.last_modifier, i.status, i.type, i2.name " "FROM RepoGroup LEFT JOIN VirtualRepo v ON " "RepoGroup.repo_id = v.repo_id " "LEFT JOIN RepoInfo i ON RepoGroup.repo_id = i.repo_id " "LEFT JOIN RepoInfo i2 ON v.origin_repo = i2.repo_id " "LEFT JOIN RepoSize s ON RepoGroup.repo_id = s.repo_id, " "Branch WHERE group_id = ? AND " "RepoGroup.repo_id = Branch.repo_id AND " "RepoGroup.repo_id = ? AND " "Branch.name = 'master'"; else sql = "SELECT OrgGroupRepo.repo_id, v.repo_id, " "group_id, owner, permission, commit_id, s.size, " "v.origin_repo, v.path, i.name, " "i.update_time, i.version, i.is_encrypted, i.last_modifier, i.status, i.type, i2.name " "FROM OrgGroupRepo LEFT JOIN VirtualRepo v ON " "OrgGroupRepo.repo_id = v.repo_id " "LEFT JOIN RepoInfo i ON OrgRepoGroup.repo_id = i.repo_id " "LEFT JOIN RepoInfo i2 ON v.origin_repo = i2.repo_id " "LEFT JOIN RepoSize s ON OrgGroupRepo.repo_id = s.repo_id, " "Branch WHERE group_id = ? AND " "OrgGroupRepo.repo_id = Branch.repo_id AND " "OrgGroupRepo.repo_id = ? AND " "Branch.name = 'master'"; /* The list 'repo' should have only one repo, * use existing api get_group_repos_cb() to get it. */ if (seaf_db_statement_foreach_row (mgr->seaf->db, sql, get_group_repos_cb, &repo, 2, "int", group_id, "string", real_repo_id) < 0) { g_free (real_repo_id); g_list_free (repo); g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "Failed to get repo by group_id from db."); return NULL; } g_free (real_repo_id); if (repo) { seaf_fill_repo_obj_from_commit (&repo); if (repo) ret = (GObject *)(repo->data); g_list_free (repo); } return ret; } GList * seaf_get_group_repos_by_user (SeafRepoManager *mgr, const char *user, int org_id, GError **error) { CcnetGroup *group; GList *groups = NULL, *p, *q; GList *repos = NULL; SeafileRepo *repo = NULL; GString *sql = NULL; int group_id = 0; /* Get the groups this user belongs to. */ groups = ccnet_group_manager_get_groups_by_user (seaf->group_mgr, user, 1, NULL); if (!groups) { goto out; } sql = g_string_new (""); g_string_printf (sql, "SELECT g.repo_id, v.repo_id, " "group_id, %s, permission, commit_id, s.size, " "v.origin_repo, v.path, i.name, " "i.update_time, i.version, i.is_encrypted, i.last_modifier, i.status, i.type, i2.name " "FROM %s g LEFT JOIN VirtualRepo v ON " "g.repo_id = v.repo_id " "LEFT JOIN RepoInfo i ON g.repo_id = i.repo_id " "LEFT JOIN RepoInfo i2 ON v.origin_repo = i2.repo_id " "LEFT JOIN RepoSize s ON g.repo_id = s.repo_id, " "Branch b WHERE g.repo_id = b.repo_id AND " "b.name = 'master' AND group_id IN (", org_id < 0 ? "user_name" : "owner", org_id < 0 ? "RepoGroup" : "OrgGroupRepo"); for (p = groups; p != NULL; p = p->next) { group = p->data; g_object_get (group, "id", &group_id, NULL); g_string_append_printf (sql, "%d", group_id); if (p->next) g_string_append_printf (sql, ","); } g_string_append_printf (sql, " ) ORDER BY group_id"); if (seaf_db_statement_foreach_row (mgr->seaf->db, sql->str, get_group_repos_cb, &repos, 0) < 0) { for (p = repos; p; p = p->next) { g_object_unref (p->data); } g_list_free (repos); g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "Failed to get user group repos from db."); seaf_warning ("Failed to get user[%s] group repos from db.\n", user); goto out; } int repo_group_id = 0; char *group_name = NULL; q = repos; /* Add group_name to repo. Both groups and repos are listed by group_id in descending order */ for (p = groups; p; p = p->next) { group = p->data; g_object_get (group, "id", &group_id, NULL); g_object_get (group, "group_name", &group_name, NULL); for (; q; q = q->next) { repo = q->data; g_object_get (repo, "group_id", &repo_group_id, NULL); if (repo_group_id == group_id) g_object_set (repo, "group_name", group_name, NULL); else break; } g_free (group_name); if (q == NULL) break; } seaf_fill_repo_obj_from_commit (&repos); out: if (sql) g_string_free (sql, TRUE); for (p = groups; p != NULL; p = p->next) g_object_unref ((GObject *)p->data); g_list_free (groups); return g_list_reverse (repos); } typedef struct RepoPath { char *repo_id; char *path; int group_id; } RepoPath; gboolean convert_repo_path_cb (SeafDBRow *row, void *data) { GList **repo_paths = data; const char *repo_id = seaf_db_row_get_column_text (row, 0); const char *path = seaf_db_row_get_column_text (row, 1); int group_id = seaf_db_row_get_column_int (row, 2); RepoPath *rp = g_new0(RepoPath, 1); rp->repo_id = g_strdup(repo_id); rp->path = g_strdup(path); rp->group_id = group_id; *repo_paths = g_list_append (*repo_paths, rp); return TRUE; } static void free_repo_path (gpointer data) { if (!data) return; RepoPath *rp = data; g_free (rp->repo_id); g_free (rp->path); g_free (rp); } static char * filter_path (GList *repo_paths, const char *path) { GList *ptr = NULL; int len; const char *relative_path; char *ret = NULL; RepoPath *rp = NULL, res; res.repo_id = NULL; res.path = NULL; res.group_id = 0; /* Find nearest item which contains @path, */ for (ptr = repo_paths; ptr; ptr = ptr->next) { rp = ptr->data; len = strlen(rp->path); if (strncmp(rp->path, path, len) == 0 && (path[len] == '/' || path[len] == '\0')) { if (g_strcmp0(rp->path, res.path) > 0) { res.path = rp->path; res.repo_id = rp->repo_id; res.group_id = rp->group_id; } } } if (res.repo_id && res.path) { relative_path = path + strlen(res.path); if (relative_path[0] == '\0') relative_path = "/"; json_t *json = json_object (); json_object_set_string_member(json, "repo_id", res.repo_id); json_object_set_string_member(json, "path", relative_path); if (res.group_id > 0) json_object_set_int_member(json, "group_id", res.group_id); ret = json_dumps (json, 0); json_decref (json); } return ret; } /* Convert origin repo and path to virtual repo and relative path */ char * seaf_repo_manager_convert_repo_path (SeafRepoManager *mgr, const char *repo_id, const char *path, const char *user, gboolean is_org, GError **error) { char *ret = NULL; int rc; int group_id; GString *sql; CcnetGroup *group; GList *groups = NULL, *p1; GList *repo_paths = NULL; SeafVirtRepo *vinfo = NULL; const char *r_repo_id = repo_id; char *r_path = NULL; vinfo = seaf_repo_manager_get_virtual_repo_info (mgr, repo_id); if (vinfo) { r_repo_id = vinfo->origin_repo_id; r_path = g_strconcat (vinfo->path, path, NULL); } else { r_path = g_strdup(path); } sql = g_string_new (""); g_string_printf (sql, "SELECT v.repo_id, path, 0 FROM VirtualRepo v, %s s WHERE " "v.origin_repo=? AND v.repo_id=s.repo_id AND s.to_email=?", is_org ? "OrgSharedRepo" : "SharedRepo"); rc = seaf_db_statement_foreach_row (seaf->db, sql->str, convert_repo_path_cb, &repo_paths, 2, "string", r_repo_id, "string", user); if (rc < 0) { seaf_warning("Failed to convert repo path [%s:%s] to virtual repo path, db_error.\n", repo_id, path); goto out; } ret = filter_path(repo_paths, r_path); g_list_free_full(repo_paths, free_repo_path); repo_paths = NULL; if (ret) goto out; /* Get the groups this user belongs to. */ groups = ccnet_group_manager_get_groups_by_user (seaf->group_mgr, user, 1, NULL); if (!groups) { goto out; } g_string_printf (sql, "SELECT v.repo_id, path, r.group_id FROM VirtualRepo v, %s r WHERE " "v.origin_repo=? AND v.repo_id=r.repo_id AND r.group_id IN(", is_org ? "OrgGroupRepo" : "RepoGroup"); for (p1 = groups; p1 != NULL; p1 = p1->next) { group = p1->data; g_object_get (group, "id", &group_id, NULL); g_string_append_printf (sql, "%d", group_id); if (p1->next) g_string_append_printf (sql, ","); } g_string_append_printf (sql, ")"); rc = seaf_db_statement_foreach_row (seaf->db, sql->str, convert_repo_path_cb, &repo_paths, 1, "string", r_repo_id); if (rc < 0) { seaf_warning("Failed to convert repo path [%s:%s] to virtual repo path, db error.\n", repo_id, path); g_string_free (sql, TRUE); goto out; } ret = filter_path(repo_paths, r_path); g_list_free_full(repo_paths, free_repo_path); out: g_free (r_path); if (vinfo) seaf_virtual_repo_info_free (vinfo); g_string_free (sql, TRUE); for (p1 = groups; p1 != NULL; p1 = p1->next) g_object_unref ((GObject *)p1->data); g_list_free (groups); return ret; } int seaf_repo_manager_set_repo_status(SeafRepoManager *mgr, const char *repo_id, RepoStatus status) { int ret = 0; if (seaf_db_statement_query (mgr->seaf->db, "UPDATE RepoInfo SET status=? " "WHERE repo_id=? OR repo_id IN " "(SELECT repo_id FROM VirtualRepo WHERE origin_repo=?)", 3, "int", status, "string", repo_id, "string", repo_id) < 0) ret = -1; return ret; } int seaf_repo_manager_get_repo_status(SeafRepoManager *mgr, const char *repo_id) { // First, check origin repo's status char *sql = "SELECT i.status FROM VirtualRepo v LEFT JOIN RepoInfo i " "ON i.repo_id=v.origin_repo WHERE v.repo_id=? " "AND i.repo_id IS NOT NULL"; int status = seaf_db_statement_get_int (mgr->seaf->db, sql, 1, "string", repo_id); if (status >= 0) { return status; } // Then check repo's own status sql = "SELECT status FROM RepoInfo WHERE repo_id=?"; status = seaf_db_statement_get_int (mgr->seaf->db, sql, 1, "string", repo_id); return status; } ================================================ FILE: server/repo-mgr.h ================================================ /* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */ #ifndef SEAF_REPO_MGR_H #define SEAF_REPO_MGR_H #include "seafile-object.h" #include "commit-mgr.h" #include "branch-mgr.h" typedef enum RepoStatus { REPO_STATUS_NORMAL, REPO_STATUS_READ_ONLY, N_REPO_STATUS, } RepoStatus; struct _SeafRepoManager; typedef struct _SeafRepo SeafRepo; typedef struct SeafVirtRepo { char repo_id[37]; char origin_repo_id[37]; char *path; char base_commit[41]; } SeafVirtRepo; struct _SeafRepo { struct _SeafRepoManager *manager; gchar id[37]; gchar *name; gchar *desc; gchar *last_modifier; gboolean encrypted; int enc_version; gchar magic[65]; /* hash(repo_id + passwd), key stretched. */ gchar pwd_hash[65]; /* hash(repo_id + passwd), key stretched. */ gchar *pwd_hash_algo; gchar *pwd_hash_params; gchar random_key[97]; gchar salt[65]; gboolean no_local_history; gint64 last_modify; gint64 size; gint64 file_count; gchar *type; int status; SeafBranch *head; gchar root_id[41]; gboolean is_corrupted; gboolean repaired; int ref_cnt; SeafVirtRepo *virtual_info; int version; /* Used to access fs and block sotre. * This id is different from repo_id when this repo is virtual. * Virtual repos share fs and block store with its origin repo. * However, commit store for each repo is always independent. * So always use repo_id to access commit store. */ gchar store_id[37]; }; gboolean is_repo_id_valid (const char *id); SeafRepo* seaf_repo_new (const char *id, const char *name, const char *desc); void seaf_repo_free (SeafRepo *repo); void seaf_repo_ref (SeafRepo *repo); void seaf_repo_unref (SeafRepo *repo); int seaf_repo_set_head (SeafRepo *repo, SeafBranch *branch); /* Update repo name, desc, magic etc from commit. */ void seaf_repo_from_commit (SeafRepo *repo, SeafCommit *commit); void seaf_fill_repo_obj_from_commit (GList **repos); /* Update repo-related fields to commit. */ void seaf_repo_to_commit (SeafRepo *repo, SeafCommit *commit); /* * Returns a list of all commits belongs to the repo. * The commits in the repos are all unique. */ GList * seaf_repo_get_commits (SeafRepo *repo); GList * seaf_repo_diff (SeafRepo *repo, const char *arg1, const char *arg2, int fold_dir_results, char **error); typedef struct _SeafRepoManager SeafRepoManager; typedef struct _SeafRepoManagerPriv SeafRepoManagerPriv; struct _SeafRepoManager { struct _SeafileSession *seaf; SeafRepoManagerPriv *priv; }; SeafRepoManager* seaf_repo_manager_new (struct _SeafileSession *seaf); int seaf_repo_manager_init (SeafRepoManager *mgr); int seaf_repo_manager_start (SeafRepoManager *mgr); /* * Repo Management functions. */ int seaf_repo_manager_add_repo (SeafRepoManager *mgr, SeafRepo *repo); int seaf_repo_manager_del_repo (SeafRepoManager *mgr, const char *repo_id, GError **error); int seaf_repo_manager_del_virtual_repo (SeafRepoManager *mgr, const char *repo_id); SeafRepo* seaf_repo_manager_get_repo (SeafRepoManager *manager, const gchar *id); /* Return repo object even if it's corrupted. */ SeafRepo* seaf_repo_manager_get_repo_ex (SeafRepoManager *manager, const gchar *id); gboolean seaf_repo_manager_repo_exists (SeafRepoManager *manager, const gchar *id); GList* seaf_repo_manager_get_repo_list (SeafRepoManager *mgr, int start, int limit, const gchar *order_by, int ret_virt_repo); gint64 seaf_repo_manager_count_repos (SeafRepoManager *mgr, GError **error); GList* seaf_repo_manager_get_trash_repo_list (SeafRepoManager *mgr, int start, int limit, GError **error); GList * seaf_repo_manager_get_trash_repos_by_owner (SeafRepoManager *mgr, const char *owner, GError **error); int seaf_repo_manager_del_repo_from_trash (SeafRepoManager *mgr, const char *repo_id, GError **error); /* Remove all entries in the repo trash. */ int seaf_repo_manager_empty_repo_trash (SeafRepoManager *mgr, GError **error); int seaf_repo_manager_empty_repo_trash_by_owner (SeafRepoManager *mgr, const char *owner, GError **error); int seaf_repo_manager_restore_repo_from_trash (SeafRepoManager *mgr, const char *repo_id, GError **error); GList * seaf_repo_manager_get_repo_id_list (SeafRepoManager *mgr); int seaf_repo_manager_branch_repo_unmap (SeafRepoManager *manager, SeafBranch *branch); /* * Repo properties functions. */ #define MAX_REPO_TOKEN 64 #define DEFAULT_REPO_TOKEN "default" char * seaf_repo_manager_get_email_by_token (SeafRepoManager *manager, const char *repo_id, const char *token); char * seaf_repo_manager_generate_repo_token (SeafRepoManager *mgr, const char *repo_id, const char *email, GError **error); int seaf_repo_manager_add_token_peer_info (SeafRepoManager *mgr, const char *token, const char *peer_id, const char *peer_ip, const char *peer_name, gint64 sync_time, const char *client_ver); int seaf_repo_manager_update_token_peer_info (SeafRepoManager *mgr, const char *token, const char *peer_ip, gint64 sync_time, const char *client_ver); gboolean seaf_repo_manager_token_peer_info_exists (SeafRepoManager *mgr, const char *token); int seaf_repo_manager_delete_token (SeafRepoManager *mgr, const char *repo_id, const char *token, const char *user, GError **error); GList * seaf_repo_manager_list_repo_tokens (SeafRepoManager *mgr, const char *repo_id, GError **error); GList * seaf_repo_manager_list_repo_tokens_by_email (SeafRepoManager *mgr, const char *email, GError **error); int seaf_repo_manager_delete_repo_tokens_by_peer_id (SeafRepoManager *mgr, const char *email, const char *peer_id, GList **tokens, GError **error); int seaf_repo_manager_delete_repo_tokens_by_email (SeafRepoManager *mgr, const char *email, GError **error); gint64 seaf_repo_manager_get_repo_size (SeafRepoManager *mgr, const char *repo_id); int seaf_repo_manager_set_repo_history_limit (SeafRepoManager *mgr, const char *repo_id, int days); /* * > 0: keep a period of history; * = 0: don't keep history; * < 0: keep full history. */ int seaf_repo_manager_get_repo_history_limit (SeafRepoManager *mgr, const char *repo_id); int seaf_repo_manager_set_repo_valid_since (SeafRepoManager *mgr, const char *repo_id, gint64 timestamp); gint64 seaf_repo_manager_get_repo_valid_since (SeafRepoManager *mgr, const char *repo_id); /* * Return the timestamp to stop traversing history. * Returns > 0 if traverse a period of history; * Returns = 0 if only traverse the head commit; * Returns < 0 if traverse full history. */ gint64 seaf_repo_manager_get_repo_truncate_time (SeafRepoManager *mgr, const char *repo_id); /* * Repo Operations. */ int seaf_repo_manager_revert_on_server (SeafRepoManager *mgr, const char *repo_id, const char *commit_id, const char *user_name, GError **error); /** * Add a new file in a repo. * The content of the file is stored in a temporary file. * @repo_id: id of the repo * @temp_file_path: path of the temporary file * @parent_dir: the directory to add this file * @file_name: the name of the new file * @user: author of this operation */ int seaf_repo_manager_post_file (SeafRepoManager *mgr, const char *repo_id, const char *temp_file_path, const char *parent_dir, const char *file_name, const char *user, GError **error); int seaf_repo_manager_post_multi_files (SeafRepoManager *mgr, const char *repo_id, const char *parent_dir, const char *filenames_json, const char *paths_json, const char *user, int replace_existed, gint64 mtime, char **new_ids, char **task_id, GError **error); /* int */ /* seaf_repo_manager_post_file_blocks (SeafRepoManager *mgr, */ /* const char *repo_id, */ /* const char *parent_dir, */ /* const char *file_name, */ /* const char *blockids_json, */ /* const char *paths_json, */ /* const char *user, */ /* gint64 file_size, */ /* int replace_existed, */ /* char **new_id, */ /* GError **error); */ int seaf_repo_manager_post_blocks (SeafRepoManager *mgr, const char *repo_id, const char *blockids_json, const char *paths_json, const char *user, GError **error); int seaf_repo_manager_commit_file_blocks (SeafRepoManager *mgr, const char *repo_id, const char *parent_dir, const char *file_name, const char *blockids_json, const char *user, gint64 file_size, int replace_existed, gint64 mtime, char **new_id, GError **error); int seaf_repo_manager_post_empty_file (SeafRepoManager *mgr, const char *repo_id, const char *parent_dir, const char *new_file_name, const char *user, GError **error); int seaf_repo_manager_post_dir (SeafRepoManager *mgr, const char *repo_id, const char *parent_dir, const char *new_dir_name, const char *user, GError **error); int seaf_repo_manager_mkdir_with_parents (SeafRepoManager *mgr, const char *repo_id, const char *parent_dir, const char *new_dir_path, const char *user, GError **error); /** * Update an existing file in a repo * @params: same as seaf_repo_manager_post_file * @head_id: the commit id for the original file version. * It's optional. If it's NULL, the current repo head will be used. * @new_file_id: The return location of the new file id */ int seaf_repo_manager_put_file (SeafRepoManager *mgr, const char *repo_id, const char *temp_file_path, const char *parent_dir, const char *file_name, const char *user, const char *head_id, gint64 mtime, char **new_file_id, GError **error); /* int */ /* seaf_repo_manager_put_file_blocks (SeafRepoManager *mgr, */ /* const char *repo_id, */ /* const char *parent_dir, */ /* const char *file_name, */ /* const char *blockids_json, */ /* const char *paths_json, */ /* const char *user, */ /* const char *head_id, */ /* gint64 file_size, */ /* char **new_file_id, */ /* GError **error); */ int seaf_repo_manager_del_file (SeafRepoManager *mgr, const char *repo_id, const char *parent_dir, const char *file_name, const char *user, GError **error); int seaf_repo_manager_batch_del_files (SeafRepoManager *mgr, const char *repo_id, const char *file_list, const char *user, GError **error); SeafileCopyResult * seaf_repo_manager_copy_file (SeafRepoManager *mgr, const char *src_repo_id, const char *src_dir, const char *src_filename, const char *dst_repo_id, const char *dst_dir, const char *dst_filename, const char *user, int need_progress, int synchronous, GError **error); SeafileCopyResult * seaf_repo_manager_copy_multiple_files (SeafRepoManager *mgr, const char *src_repo_id, const char *src_dir, const char *src_filenames, const char *dst_repo_id, const char *dst_dir, const char *dst_filenames, const char *user, int need_progress, int synchronous, GError **error); SeafileCopyResult * seaf_repo_manager_move_file (SeafRepoManager *mgr, const char *src_repo_id, const char *src_dir, const char *src_filename, const char *dst_repo_id, const char *dst_dir, const char *dst_filename, int replace, const char *user, int need_progress, int synchronous, GError **error); SeafileCopyResult * seaf_repo_manager_move_multiple_files (SeafRepoManager *mgr, const char *src_repo_id, const char *src_dir, const char *src_filenames, const char *dst_repo_id, const char *dst_dir, const char *dst_filenames, int replace, const char *user, int need_progress, int synchronous, GError **error); int seaf_repo_manager_rename_file (SeafRepoManager *mgr, const char *repo_id, const char *parent_dir, const char *oldname, const char *newname, const char *user, GError **error); int seaf_repo_manager_is_valid_filename (SeafRepoManager *mgr, const char *repo_id, const char *filename, GError **error); char * seaf_repo_manager_create_new_repo (SeafRepoManager *mgr, const char *repo_name, const char *repo_desc, const char *owner_email, const char *passwd, int enc_version, const char *pwd_hash_algo, const char *pwd_hash_params, GError **error); char * seaf_repo_manager_create_enc_repo (SeafRepoManager *mgr, const char *repo_id, const char *repo_name, const char *repo_desc, const char *owner_email, const char *magic, const char *random_key, const char *salt, int enc_version, const char *pwd_hash, const char *pwd_hash_algo, const char *pwd_hash_params, GError **error); /* Give a repo and a path in this repo, returns a list of commits, where every * commit contains a unique version of the file. The commits are sorted in * ascending order of commit time. */ GList * seaf_repo_manager_list_file_revisions (SeafRepoManager *mgr, const char *repo_id, const char *start_commit_id, const char *path, int limit, gboolean got_latest, gboolean got_second, GError **error); GList * seaf_repo_manager_calc_files_last_modified (SeafRepoManager *mgr, const char *repo_id, const char *parent_dir, int limit, GError **error); int seaf_repo_manager_revert_file (SeafRepoManager *mgr, const char *repo_id, const char *commit_id, const char *path, const char *user, GError **error); int seaf_repo_manager_revert_dir (SeafRepoManager *mgr, const char *repo_id, const char *old_commit_id, const char *dir_path, const char *user, GError **error); /* * Return deleted files/dirs. */ GList * seaf_repo_manager_get_deleted_entries (SeafRepoManager *mgr, const char *repo_id, int show_days, const char *path, const char *scan_stat, int limit, GError **error); /* * Set the dir_id of @dir_path to @new_dir_id. * @new_commit_id: The new head commit id after the update. */ int seaf_repo_manager_update_dir (SeafRepoManager *mgr, const char *repo_id, const char *dir_path, const char *new_dir_id, const char *user, const char *head_id, char *new_commit_id, GError **error); /* * Permission related functions. */ /* Owner functions. */ int seaf_repo_manager_set_repo_owner (SeafRepoManager *mgr, const char *repo_id, const char *email); char * seaf_repo_manager_get_repo_owner (SeafRepoManager *mgr, const char *repo_id); GList * seaf_repo_manager_get_orphan_repo_list (SeafRepoManager *mgr); /* TODO: add start and limit. */ /* Get repos owned by this user. */ GList * seaf_repo_manager_get_repos_by_owner (SeafRepoManager *mgr, const char *email, int ret_corrupted, int start, int limit, gboolean *db_err); GList * seaf_repo_manager_get_repos_by_id_prefix (SeafRepoManager *mgr, const char *id_prefix, int start, int limit); GList * seaf_repo_manager_search_repos_by_name (SeafRepoManager *mgr, const char *name); GList * seaf_repo_manager_get_repo_ids_by_owner (SeafRepoManager *mgr, const char *email); /* Group repos. */ int seaf_repo_manager_add_group_repo (SeafRepoManager *mgr, const char *repo_id, int group_id, const char *owner, const char *permission, GError **error); int seaf_repo_manager_del_group_repo (SeafRepoManager *mgr, const char *repo_id, int group_id, GError **error); GList * seaf_repo_manager_get_groups_by_repo (SeafRepoManager *mgr, const char *repo_id, GError **error); typedef struct GroupPerm { int group_id; char permission[16]; } GroupPerm; GList * seaf_repo_manager_get_group_perm_by_repo (SeafRepoManager *mgr, const char *repo_id, GError **error); int seaf_repo_manager_set_group_repo_perm (SeafRepoManager *mgr, const char *repo_id, int group_id, const char *permission, GError **error); char * seaf_repo_manager_get_group_repo_owner (SeafRepoManager *mgr, const char *repo_id, GError **error); GList * seaf_repo_manager_get_group_repoids (SeafRepoManager *mgr, int group_id, GError **error); GList * seaf_repo_manager_get_repos_by_group (SeafRepoManager *mgr, int group_id, GError **error); GList * seaf_repo_manager_get_group_repos_by_owner (SeafRepoManager *mgr, const char *owner, GError **error); int seaf_repo_manager_remove_group_repos (SeafRepoManager *mgr, int group_id, const char *owner, GError **error); /* Inner public repos */ int seaf_repo_manager_set_inner_pub_repo (SeafRepoManager *mgr, const char *repo_id, const char *permission); int seaf_repo_manager_unset_inner_pub_repo (SeafRepoManager *mgr, const char *repo_id); gboolean seaf_repo_manager_is_inner_pub_repo (SeafRepoManager *mgr, const char *repo_id); GList * seaf_repo_manager_list_inner_pub_repos (SeafRepoManager *mgr, gboolean *db_err); gint64 seaf_repo_manager_count_inner_pub_repos (SeafRepoManager *mgr); GList * seaf_repo_manager_list_inner_pub_repos_by_owner (SeafRepoManager *mgr, const char *user); char * seaf_repo_manager_get_inner_pub_repo_perm (SeafRepoManager *mgr, const char *repo_id); /* * Comprehensive repo permission checker. * It checks if @user have permission to access @repo_id. */ char * seaf_repo_manager_check_permission (SeafRepoManager *mgr, const char *repo_id, const char *user, GError **error); GList * seaf_repo_manager_list_dir_with_perm (SeafRepoManager *mgr, const char *repo_id, const char *dir_path, const char *dir_id, const char *user, int offset, int limit, GError **error); /* Web access permission. */ int seaf_repo_manager_set_access_property (SeafRepoManager *mgr, const char *repo_id, const char *ap); char * seaf_repo_manager_query_access_property (SeafRepoManager *mgr, const char *repo_id); /* Decrypted repo token cache. */ void seaf_repo_manager_add_decrypted_token (SeafRepoManager *mgr, const char *encrypted_token, const char *session_key, const char *decrypted_token); char * seaf_repo_manager_get_decrypted_token (SeafRepoManager *mgr, const char *encrypted_token, const char *session_key); /* Virtual repo related. */ char * seaf_repo_manager_create_virtual_repo (SeafRepoManager *mgr, const char *origin_repo_id, const char *path, const char *repo_name, const char *repo_desc, const char *owner, const char *passwd, GError **error); SeafVirtRepo * seaf_repo_manager_get_virtual_repo_info (SeafRepoManager *mgr, const char *repo_id); GList * seaf_repo_manager_get_virtual_info_by_origin (SeafRepoManager *mgr, const char *origin_repo); void seaf_virtual_repo_info_free (SeafVirtRepo *vinfo); gboolean seaf_repo_manager_is_virtual_repo (SeafRepoManager *mgr, const char *repo_id); char * seaf_repo_manager_get_virtual_repo_id (SeafRepoManager *mgr, const char *origin_repo, const char *path, const char *owner); GList * seaf_repo_manager_get_virtual_repos_by_owner (SeafRepoManager *mgr, const char *owner, GError **error); GList * seaf_repo_manager_get_virtual_repo_ids_by_origin (SeafRepoManager *mgr, const char *origin_repo); /* * if @repo_id is a virtual repo, try to merge with origin; * if not, try to merge with its virtual repos. */ int seaf_repo_manager_merge_virtual_repo (SeafRepoManager *mgr, const char *repo_id, const char *exclude_repo); /* * Check each virtual repo of @origin_repo_id, if the path corresponds to it * doesn't exist, delete the virtual repo. */ void seaf_repo_manager_cleanup_virtual_repos (SeafRepoManager *mgr, const char *origin_repo_id); int seaf_repo_manager_init_merge_scheduler (); GList * seaf_repo_manager_get_shared_users_for_subdir (SeafRepoManager *mgr, const char *repo_id, const char *path, const char *from_user, GError **error); GList * seaf_repo_manager_get_shared_groups_for_subdir (SeafRepoManager *mgr, const char *repo_id, const char *path, const char *from_user, GError **error); int seaf_repo_manager_edit_repo (const char *repo_id, const char *name, const char *description, const char *user, GError **error); gint64 seaf_get_total_file_number (GError **error); gint64 seaf_get_total_storage (GError **error); /* Online GC related */ char * seaf_repo_get_current_gc_id (SeafRepo *repo); char * seaf_repo_get_last_gc_id (SeafRepo *repo, const char *client_id); gboolean seaf_repo_has_last_gc_id (SeafRepo *repo, const char *client_id); int seaf_repo_set_last_gc_id (SeafRepo *repo, const char *client_id, const char *gc_id); int seaf_repo_remove_last_gc_id (SeafRepo *repo, const char *client_id); int seaf_repo_manager_add_upload_tmp_file (SeafRepoManager *mgr, const char *repo_id, const char *file_path, const char *tmp_file, GError **error); int seaf_repo_manager_del_upload_tmp_file (SeafRepoManager *mgr, const char *repo_id, const char *file_path, GError **error); char * seaf_repo_manager_get_upload_tmp_file (SeafRepoManager *mgr, const char *repo_id, const char *file_path, GError **error); gint64 seaf_repo_manager_get_upload_tmp_file_offset (SeafRepoManager *mgr, const char *repo_id, const char *file_path, GError **error); void seaf_repo_manager_update_repo_info (SeafRepoManager *mgr, const char *repo_id, const char *head_commit_id); int set_repo_commit_to_db (const char *repo_id, const char *repo_name, gint64 update_time, int version, gboolean is_encrypted, const char *last_modifier); char * seaf_get_trash_repo_owner (const char *repo_id); GObject * seaf_get_group_shared_repo_by_path (SeafRepoManager *mgr, const char *repo_id, const char *path, int group_id, gboolean is_org, GError **error); GList * seaf_get_group_repos_by_user (SeafRepoManager *mgr, const char *user, int org_id, GError **error); int seaf_repo_manager_set_subdir_group_perm_by_path (SeafRepoManager *mgr, const char *repo_id, const char *username, int group_id, const char *permission, const char *path); int post_files_and_gen_commit (GList *filenames, const char *repo_id, const char *user, char **ret_json, int replace_existed, const char *canon_path, GList *id_list, GList *size_list, gint64 mtime, char *last_gc_id, GError **error); char * seaf_repo_manager_convert_repo_path (SeafRepoManager *mgr, const char *repo_id, const char *path, const char *user, gboolean is_org, GError **error); int seaf_repo_manager_set_repo_status(SeafRepoManager *mgr, const char *repo_id, RepoStatus status); int seaf_repo_manager_get_repo_status(SeafRepoManager *mgr, const char *repo_id); int seaf_repo_manager_repair_virtual_repo (char *repo_id); #endif ================================================ FILE: server/repo-op.c ================================================ /* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */ #include "common.h" #include #include #include #include "utils.h" #define DEBUG_FLAG SEAFILE_DEBUG_OTHER #include "log.h" #include "seafile-object.h" #include "seafile-session.h" #include "commit-mgr.h" #include "branch-mgr.h" #include "repo-mgr.h" #include "fs-mgr.h" #include "seafile-error.h" #include "seafile-crypt.h" #include "diff-simple.h" #include "merge-new.h" #include "change-set.h" #include "seaf-utils.h" #include "seaf-db.h" #define INDEX_DIR "index" #define PREFIX_DEL_FILE "Deleted \"" #define PREFIX_DEL_DIR "Removed directory \"" #define PREFIX_DEL_DIRS "Removed \"" #define DUPLICATE_NAMES_COUNT 1000 gboolean should_ignore_file(const char *filename, void *data); static gboolean is_virtual_repo_and_origin (SeafRepo *repo1, SeafRepo *repo2); static gboolean check_file_count_and_size (SeafRepo *repo, SeafDirent *dent, gint64 total_files, gint64 *total_size_all, char **err_str); int post_files_and_gen_commit (GList *filenames, const char *repo_id, const char *user, char **ret_json, int replace_existed, const char *canon_path, GList *id_list, GList *size_list, gint64 mtime, char *last_gc_id, GError **error); /* * Repo operations. */ static gint compare_dirents (gconstpointer a, gconstpointer b) { const SeafDirent *ent_a = a, *ent_b = b; return strcmp (ent_b->name, ent_a->name); } static inline GList * dup_seafdir_entries (const GList *entries) { const GList *p; GList *newentries = NULL; SeafDirent *dent; for (p = entries; p; p = p->next) { dent = p->data; newentries = g_list_prepend (newentries, seaf_dirent_dup(dent)); } return g_list_reverse(newentries); } static gboolean filename_exists (GList *entries, const char *filename) { GList *ptr; SeafDirent *dent; for (ptr = entries; ptr != NULL; ptr = ptr->next) { dent = ptr->data; if (strcmp (dent->name, filename) == 0) return TRUE; } return FALSE; } static char * generate_unique_filename (const char *file, GList *entries) { int i = 1; char *name, *ext, *unique_name; unique_name = g_strdup(file); split_filename (unique_name, &name, &ext); while (filename_exists (entries, unique_name) && i <= DUPLICATE_NAMES_COUNT) { g_free (unique_name); if (ext) unique_name = g_strdup_printf ("%s (%d).%s", name, i, ext); else unique_name = g_strdup_printf ("%s (%d)", name, i); i++; } g_free (name); g_free (ext); if (i <= DUPLICATE_NAMES_COUNT) return unique_name; else { g_free (unique_name); return NULL; } } /* We need to call this function recursively because every dirs in canon_path * need to be updated. */ static char * post_file_recursive (SeafRepo *repo, const char *dir_id, const char *to_path, int replace_existed, SeafDirent *newdent) { SeafDir *olddir, *newdir; SeafDirent *dent; GList *ptr; char *slash; char *to_path_dup = NULL; char *remain = NULL; char *id = NULL; char *ret = NULL; olddir = seaf_fs_manager_get_seafdir_sorted(seaf->fs_mgr, repo->store_id, repo->version, dir_id); if (!olddir) return NULL; /* we reach the target dir. new dir entry is added */ if (*to_path == '\0') { GList *newentries = NULL; char *unique_name; SeafDirent *dent_dup; if (replace_existed && filename_exists(olddir->entries, newdent->name)) { GList *p; SeafDirent *dent; for (p = olddir->entries; p; p = p->next) { dent = p->data; if (strcmp(dent->name, newdent->name) == 0) { newentries = g_list_prepend (newentries, seaf_dirent_dup(newdent)); } else { newentries = g_list_prepend (newentries, seaf_dirent_dup(dent)); } } newentries = g_list_reverse (newentries); newdir = seaf_dir_new (NULL, newentries, dir_version_from_repo_version(repo->version)); if (seaf_dir_save (seaf->fs_mgr, repo->store_id, repo->version, newdir) == 0) { ret = g_strdup (newdir->dir_id); } seaf_dir_free (newdir); goto out; } unique_name = generate_unique_filename (newdent->name, olddir->entries); if (!unique_name) goto out; dent_dup = seaf_dirent_new (newdent->version, newdent->id, newdent->mode, unique_name, newdent->mtime, newdent->modifier, newdent->size); g_free (unique_name); newentries = dup_seafdir_entries (olddir->entries); newentries = g_list_insert_sorted (newentries, dent_dup, compare_dirents); newdir = seaf_dir_new (NULL, newentries, dir_version_from_repo_version(repo->version)); if (seaf_dir_save (seaf->fs_mgr, repo->store_id, repo->version, newdir) == 0) ret = g_strdup (newdir->dir_id); seaf_dir_free (newdir); goto out; } to_path_dup = g_strdup (to_path); slash = strchr (to_path_dup, '/'); if (!slash) { remain = to_path_dup + strlen(to_path_dup); } else { *slash = '\0'; remain = slash + 1; } for (ptr = olddir->entries; ptr; ptr = ptr->next) { dent = (SeafDirent *)ptr->data; if (strcmp(dent->name, to_path_dup) != 0) continue; id = post_file_recursive (repo, dent->id, remain, replace_existed, newdent); if (id != NULL) { memcpy(dent->id, id, 40); dent->id[40] = '\0'; if (repo->version > 0) dent->mtime = (guint64)time(NULL); } break; } if (id != NULL) { /* Create a new SeafDir. */ GList *new_entries; new_entries = dup_seafdir_entries (olddir->entries); newdir = seaf_dir_new (NULL, new_entries, dir_version_from_repo_version(repo->version)); if (seaf_dir_save (seaf->fs_mgr, repo->store_id, repo->version, newdir) == 0) ret = g_strndup (newdir->dir_id, 40); seaf_dir_free (newdir); } out: g_free (to_path_dup); g_free (id); seaf_dir_free(olddir); return ret; } static char * do_post_file_replace (SeafRepo *repo, const char *root_id, const char *parent_dir, int replace_existed, SeafDirent *dent) { /* if parent_dir is a absolutely path, we will remove the first '/' */ if (*parent_dir == '/') parent_dir = parent_dir + 1; return post_file_recursive(repo, root_id, parent_dir, replace_existed, dent); } static char * do_post_file (SeafRepo *repo, const char *root_id, const char *parent_dir, SeafDirent *dent) { return do_post_file_replace(repo, root_id, parent_dir, 0, dent); } static char * get_canonical_path (const char *path) { char *ret = g_strdup (path); char *p; for (p = ret; *p != 0; ++p) { if (*p == '\\') *p = '/'; } /* Remove trailing slashes from dir path. */ int len = strlen(ret); int i = len - 1; while (i >= 0 && ret[i] == '/') ret[i--] = 0; return ret; } /* Return TRUE if @filename already existing in @parent_dir. If exists, and @mode is not NULL, set its value to the mode of the dirent. */ static gboolean check_file_exists (const char *store_id, int repo_version, const char *root_id, const char *parent_dir, const char *filename, int *mode) { SeafDir *dir; GList *p; SeafDirent *dent; int ret = FALSE; dir = seaf_fs_manager_get_seafdir_by_path (seaf->fs_mgr, store_id, repo_version, root_id, parent_dir, NULL); if (!dir) { seaf_warning ("parent_dir %s doesn't exist in repo %s.\n", parent_dir, store_id); return FALSE; } for (p = dir->entries; p != NULL; p = p->next) { dent = p->data; int r = strcmp (dent->name, filename); if (r == 0) { ret = TRUE; if (mode) { *mode = dent->mode; } break; } } seaf_dir_free (dir); return ret; } /** Various online file/directory operations: Put a file: 1. find parent seafdir 2. add a new dirent to parent seafdir 2. recursively update all seafdir in the path, in a bottom-up manner 3. commit it Del a file/dir: basically the same as put a file copy a file/dir: 1. get src dirent from src repo 2. duplicate src dirent with the new file name 3. put the new dirent to dst repo and commit it. Move a file/dir: basically the same as a copy operation. Just one more step: 4. remove src dirent from src repo and commit it Rename a file/dir: 1. find parent seafdir 2. update this seafdir with the old dirent replaced by a new dirent. 3. recursively update all seafdir in the path NOTE: All operations which add a new dirent would check if a dirent with the same name already exists. If found, they would raise errors. All operations which remove a dirent would check if the dirent to be removed already exists. If not, they would do nothing and just return OK. */ #define GET_REPO_OR_FAIL(repo_var,repo_id) \ do { \ repo_var = seaf_repo_manager_get_repo (seaf->repo_mgr, (repo_id)); \ if (!(repo_var)) { \ seaf_warning ("Repo %s doesn't exist.\n", (repo_id)); \ g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid repo"); \ ret = -1; \ goto out; \ } \ } while (0); #define GET_COMMIT_OR_FAIL(commit_var,repo_id,repo_version,commit_id) \ do { \ commit_var = seaf_commit_manager_get_commit(seaf->commit_mgr, (repo_id), (repo_version), (commit_id)); \ if (!(commit_var)) { \ seaf_warning ("commit %s:%s doesn't exist.\n", (repo_id), (commit_id)); \ g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid commit"); \ ret = -1; \ goto out; \ } \ } while (0); #define FAIL_IF_FILE_EXISTS(store_id,repo_version,root_id,parent_dir,filename,mode) \ do { \ if (check_file_exists ((store_id), (repo_version), (root_id), (parent_dir), (filename), (mode))) { \ g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \ "file already exists"); \ ret = -1; \ goto out; \ } \ } while (0); #define FAIL_IF_FILE_NOT_EXISTS(store_id,repo_version,root_id,parent_dir,filename,mode) \ do { \ if (!check_file_exists ((store_id), (repo_version), (root_id), (parent_dir), (filename), (mode))) { \ g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \ "file does not exist"); \ ret = -1; \ goto out; \ } \ } while (0); #define STD_FILE_MODE (S_IFREG | 0644) static char * gen_merge_description (SeafRepo *repo, const char *merged_root, const char *p1_root, const char *p2_root) { GList *p; GList *results = NULL; char *desc; diff_merge_roots (repo->store_id, repo->version, merged_root, p1_root, p2_root, &results, TRUE); desc = diff_results_to_description (results); for (p = results; p; p = p->next) { DiffEntry *de = p->data; diff_entry_free (de); } g_list_free (results); return desc; } static int gen_new_commit (const char *repo_id, SeafCommit *base, const char *new_root, const char *user, const char *desc, char *new_commit_id, gboolean handle_concurrent_update, gboolean check_gc, const char *last_gc_id, GError **error) { #define MAX_RETRY_COUNT 10 SeafRepo *repo = NULL; SeafCommit *new_commit = NULL, *current_head = NULL, *merged_commit = NULL; int retry_cnt = 0; gboolean gc_conflict = FALSE; int ret = 0; repo = seaf_repo_manager_get_repo (seaf->repo_mgr, repo_id); if (!repo) { seaf_warning ("Repo %s doesn't exist.\n", repo_id); g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "Invalid repo"); ret = -1; goto out; } /* Create a new commit pointing to new_root. */ new_commit = seaf_commit_new(NULL, repo->id, new_root, user, EMPTY_SHA1, desc, 0); new_commit->parent_id = g_strdup (base->commit_id); seaf_repo_to_commit (repo, new_commit); if (seaf_commit_manager_add_commit (seaf->commit_mgr, new_commit) < 0) { seaf_warning ("Failed to add commit.\n"); g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "Failed to add commit"); ret = -1; goto out; } retry: current_head = seaf_commit_manager_get_commit (seaf->commit_mgr, repo->id, repo->version, repo->head->commit_id); if (!current_head) { seaf_warning ("Failed to find head commit %s of %s.\n", repo->head->commit_id, repo_id); g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "Invalid repo"); ret = -1; goto out; } /* Merge if base and head are not the same. */ if (strcmp (base->commit_id, current_head->commit_id) != 0) { if (!handle_concurrent_update) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_CONCURRENT_UPLOAD, "Concurrent upload"); ret = -1; goto out; } MergeOptions opt; const char *roots[3]; char *desc = NULL; memset (&opt, 0, sizeof(opt)); opt.n_ways = 3; memcpy (opt.remote_repo_id, repo_id, 36); memcpy (opt.remote_head, new_commit->commit_id, 40); opt.do_merge = TRUE; roots[0] = base->root_id; /* base */ roots[1] = current_head->root_id; /* head */ roots[2] = new_root; /* remote */ if (seaf_merge_trees (repo->store_id, repo->version, 3, roots, &opt) < 0) { seaf_warning ("Failed to merge.\n"); g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "Internal error"); ret = -1; goto out; } seaf_debug ("Number of dirs visted in merge %.8s: %d.\n", repo_id, opt.visit_dirs); if (!opt.conflict) desc = g_strdup("Auto merge by system"); else { desc = gen_merge_description (repo, opt.merged_tree_root, current_head->root_id, new_root); if (!desc) desc = g_strdup("Auto merge by system"); } merged_commit = seaf_commit_new(NULL, repo->id, opt.merged_tree_root, user, EMPTY_SHA1, desc, 0); g_free (desc); merged_commit->parent_id = g_strdup (current_head->commit_id); merged_commit->second_parent_id = g_strdup (new_commit->commit_id); merged_commit->new_merge = TRUE; if (opt.conflict) merged_commit->conflict = TRUE; seaf_repo_to_commit (repo, merged_commit); if (seaf_commit_manager_add_commit (seaf->commit_mgr, merged_commit) < 0) { seaf_warning ("Failed to add commit.\n"); g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "Failed to add commit"); ret = -1; goto out; } } else { seaf_commit_ref (new_commit); merged_commit = new_commit; } seaf_branch_set_commit(repo->head, merged_commit->commit_id); if (seaf_db_type(seaf->db) == SEAF_DB_TYPE_SQLITE) check_gc = FALSE; if (check_gc) gc_conflict = FALSE; if (seaf_branch_manager_test_and_update_branch(seaf->branch_mgr, repo->head, current_head->commit_id, check_gc, last_gc_id, repo->store_id, &gc_conflict) < 0) { if (check_gc && gc_conflict) { seaf_warning ("Head branch update for repo %s conflicts with GC.\n", repo->id); g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GC_CONFLICT, "GC Conflict"); ret = -1; goto out; } if (!handle_concurrent_update) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_CONCURRENT_UPLOAD, "Concurrent upload"); ret = -1; goto out; } seaf_repo_unref (repo); repo = NULL; seaf_commit_unref (current_head); current_head = NULL; seaf_commit_unref (merged_commit); merged_commit = NULL; if (++retry_cnt <= MAX_RETRY_COUNT) { /* Sleep random time between 0 and 3 seconds. */ usleep (g_random_int_range(0, 30) * 100 * 1000); repo = seaf_repo_manager_get_repo (seaf->repo_mgr, repo_id); if (!repo) { seaf_warning ("Repo %s doesn't exist.\n", repo_id); g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "Invalid repo"); ret = -1; goto out; } goto retry; } else { seaf_warning ("Stop updating repo %s after %d retries.\n", repo_id, MAX_RETRY_COUNT); g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "Concurrent update"); ret = -1; goto out; } } if (new_commit_id) memcpy (new_commit_id, merged_commit->commit_id, 41); out: seaf_commit_unref (new_commit); seaf_commit_unref (current_head); seaf_commit_unref (merged_commit); seaf_repo_unref (repo); return ret; } static void update_repo_size(const char *repo_id) { schedule_repo_size_computation (seaf->size_sched, repo_id); } int seaf_repo_manager_post_file (SeafRepoManager *mgr, const char *repo_id, const char *temp_file_path, const char *parent_dir, const char *file_name, const char *user, GError **error) { SeafRepo *repo = NULL; SeafCommit *head_commit = NULL; char *canon_path = NULL; unsigned char sha1[20]; char buf[SEAF_PATH_MAX]; char *root_id = NULL; SeafileCrypt *crypt = NULL; SeafDirent *new_dent = NULL; char hex[41]; char *gc_id = NULL; int ret = 0; int retry_cnt = 0; if (g_access (temp_file_path, R_OK) != 0) { seaf_warning ("[post file] File %s doesn't exist or not readable.\n", temp_file_path); g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid input file"); return -1; } GET_REPO_OR_FAIL(repo, repo_id); GET_COMMIT_OR_FAIL(head_commit, repo->id, repo->version, repo->head->commit_id); if (!canon_path) canon_path = get_canonical_path (parent_dir); if (should_ignore_file (file_name, NULL)) { seaf_debug ("[post file] Invalid filename %s.\n", file_name); g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid filename"); ret = -1; goto out; } if (strstr (parent_dir, "//") != NULL) { seaf_debug ("[post file] parent_dir cantains // sequence.\n"); g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid parent dir"); ret = -1; goto out; } /* Write blocks. */ if (repo->encrypted) { unsigned char key[32], iv[16]; if (seaf_passwd_manager_get_decrypt_key_raw (seaf->passwd_mgr, repo_id, user, key, iv) < 0) { seaf_debug ("Passwd for repo %s is not set.\n", repo_id); g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "Passwd is not set"); ret = -1; goto out; } crypt = seafile_crypt_new (repo->enc_version, key, iv); } gc_id = seaf_repo_get_current_gc_id (repo); gint64 size; if (seaf_fs_manager_index_blocks (seaf->fs_mgr, repo->store_id, repo->version, temp_file_path, sha1, &size, crypt, TRUE, FALSE, NULL) < 0) { seaf_warning ("failed to index blocks"); g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "Failed to index blocks"); ret = -1; goto out; } rawdata_to_hex(sha1, hex, 20); new_dent = seaf_dirent_new (dir_version_from_repo_version (repo->version), hex, STD_FILE_MODE, file_name, (gint64)time(NULL), user, size); retry: root_id = do_post_file (repo, head_commit->root_id, canon_path, new_dent); if (!root_id) { seaf_warning ("[post file] Failed to post file %s to %s in repo %s.\n", file_name, canon_path, repo->id); g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "Failed to post file"); ret = -1; goto out; } snprintf(buf, SEAF_PATH_MAX, "Added \"%s\"", file_name); if (gen_new_commit (repo_id, head_commit, root_id, user, buf, NULL, FALSE, TRUE, gc_id, error) < 0) { if (*error == NULL || (*error)->code != SEAF_ERR_CONCURRENT_UPLOAD) { ret = -1; goto out; } retry_cnt++; seaf_debug ("[post file] Concurrent upload retry :%d\n", retry_cnt); /* Sleep random time between 0 and 3 seconds. */ usleep (g_random_int_range(0, 30) * 100 * 1000); g_free (root_id); g_clear_error (error); seaf_repo_unref (repo); seaf_commit_unref(head_commit); GET_REPO_OR_FAIL(repo, repo_id); GET_COMMIT_OR_FAIL(head_commit, repo->id, repo->version, repo->head->commit_id); goto retry; } seaf_repo_manager_merge_virtual_repo (mgr, repo_id, NULL); out: if (repo) seaf_repo_unref (repo); if (head_commit) seaf_commit_unref(head_commit); seaf_dirent_free (new_dent); g_free (root_id); g_free (canon_path); g_free (crypt); g_free (gc_id); if (ret == 0) update_repo_size(repo_id); return ret; } static int add_new_entries (SeafRepo *repo, const char *user, GList **entries, GList *dents, int replace_existed, GList **name_list) { GList *ptr; SeafDirent *dent; for (ptr = dents; ptr; ptr = ptr->next) { dent = ptr->data; char *unique_name; SeafDirent *newdent; gboolean replace = FALSE; if (replace_existed) { GList *p; SeafDirent *tmp_dent; for (p = *entries; p; p = p->next) { tmp_dent = p->data; if (strcmp(tmp_dent->name, dent->name) == 0) { replace = TRUE; *entries = g_list_delete_link (*entries, p); seaf_dirent_free (tmp_dent); break; } } } if (replace) unique_name = g_strdup (dent->name); else unique_name = generate_unique_filename (dent->name, *entries); if (unique_name != NULL) { newdent = seaf_dirent_new (dir_version_from_repo_version(repo->version), dent->id, dent->mode, unique_name, dent->mtime, user, dent->size); *entries = g_list_insert_sorted (*entries, newdent, compare_dirents); *name_list = g_list_append (*name_list, unique_name); /* No need to free unique_name */ } else { return -1; } } return 0; } static char * post_multi_files_recursive (SeafRepo *repo, const char *dir_id, const char *to_path, GList *dents, const char *user, int replace_existed, GList **name_list) { SeafDir *olddir, *newdir; SeafDirent *dent; GList *ptr; char *slash; char *to_path_dup = NULL; char *remain = NULL; char *id = NULL; char *ret = NULL; olddir = seaf_fs_manager_get_seafdir_sorted(seaf->fs_mgr, repo->store_id, repo->version, dir_id); if (!olddir) return NULL; /* we reach the target dir. new dir entry is added */ if (*to_path == '\0') { GList *newentries; newentries = dup_seafdir_entries (olddir->entries); if (add_new_entries (repo, user, &newentries, dents, replace_existed, name_list) < 0) goto out; newdir = seaf_dir_new (NULL, newentries, dir_version_from_repo_version(repo->version)); if (seaf_dir_save (seaf->fs_mgr, repo->store_id, repo->version, newdir) == 0) ret = g_strdup (newdir->dir_id); seaf_dir_free (newdir); goto out; } to_path_dup = g_strdup (to_path); slash = strchr (to_path_dup, '/'); if (!slash) { remain = to_path_dup + strlen(to_path_dup); } else { *slash = '\0'; remain = slash + 1; } for (ptr = olddir->entries; ptr; ptr = ptr->next) { dent = (SeafDirent *)ptr->data; if (strcmp(dent->name, to_path_dup) != 0) continue; id = post_multi_files_recursive (repo, dent->id, remain, dents, user, replace_existed, name_list); if (id != NULL) { memcpy(dent->id, id, 40); dent->id[40] = '\0'; if (repo->version > 0) dent->mtime = (guint64)time(NULL); } break; } if (id != NULL) { /* Create a new SeafDir. */ GList *new_entries; new_entries = dup_seafdir_entries (olddir->entries); newdir = seaf_dir_new (NULL, new_entries, dir_version_from_repo_version(repo->version)); if (seaf_dir_save (seaf->fs_mgr, repo->store_id, repo->version, newdir) == 0) ret = g_strdup (newdir->dir_id); seaf_dir_free (newdir); } out: g_free (to_path_dup); g_free (id); seaf_dir_free(olddir); return ret; } static char * do_post_multi_files (SeafRepo *repo, const char *root_id, const char *parent_dir, GList *filenames, GList *id_list, GList *size_list, const char *user, int replace_existed, gint64 mtime, GList **name_list) { SeafDirent *dent; GList *dents = NULL; GList *ptr1, *ptr2, *ptr3; char *ret; for (ptr1 = filenames, ptr2 = id_list, ptr3 = size_list; ptr1 && ptr2 && ptr3; ptr1 = ptr1->next, ptr2 = ptr2->next, ptr3 = ptr3->next) { char *name = ptr1->data; char *id = ptr2->data; gint64 *size = ptr3->data; dent = g_new0 (SeafDirent, 1); dent->name = name; memcpy(dent->id, id, 40); dent->id[40] = '\0'; dent->size = *size; dent->mode = STD_FILE_MODE; if (mtime > 0) { dent->mtime = mtime; } else { dent->mtime = (gint64)time(NULL); } dents = g_list_append (dents, dent); } /* if parent_dir is a absolutely path, we will remove the first '/' */ if (*parent_dir == '/') parent_dir = parent_dir + 1; ret = post_multi_files_recursive(repo, root_id, parent_dir, dents, user, replace_existed, name_list); g_list_free_full (dents, g_free); return ret; } static GList * json_to_file_list (const char *files_json) { json_t *array; GList *files = NULL; json_error_t jerror; size_t index; json_t *value; const char *file; char *norm_file; array = json_loadb (files_json, strlen(files_json), 0, &jerror); if (!array) { seaf_warning ("Failed to load json file list: %s.\n", jerror.text); return NULL; } size_t n = json_array_size (array); for (index = 0; index < n; index++) { value = json_array_get (array, index); file = json_string_value (value); if (!file) { g_list_free_full (files, g_free); files = NULL; break; } norm_file = normalize_utf8_path (file); if (!norm_file) { g_list_free_full (files, g_free); files = NULL; break; } files = g_list_prepend (files, norm_file); } json_decref (array); return g_list_reverse(files); } /* * Return [{'name': 'file1', 'id': 'id1', 'size': num1}, {'name': 'file2', 'id': 'id2', 'size': num2}] */ static char * format_json_ret (GList *name_list, GList *id_list, GList *size_list) { json_t *array, *obj; GList *ptr, *ptr2; GList *sptr; char *filename, *id; gint64 *size; char *json_data; char *ret; array = json_array (); for (ptr = name_list, ptr2 = id_list, sptr = size_list; ptr && ptr2 && sptr; ptr = ptr->next, ptr2 = ptr2->next, sptr = sptr->next) { filename = ptr->data; id = ptr2->data; size = sptr->data; obj = json_object (); json_object_set_string_member (obj, "name", filename); json_object_set_string_member (obj, "id", id); json_object_set_int_member (obj, "size", *size); json_array_append_new (array, obj); } json_data = json_dumps (array, 0); json_decref (array); ret = g_strdup (json_data); free (json_data); return ret; } static gboolean check_files_with_same_name (SeafRepo *repo, const char *parent_dir, GList *filenames) { char *canon_path = NULL; SeafCommit *commit = NULL; SeafDir *dir = NULL; gboolean ret = FALSE; commit = seaf_commit_manager_get_commit(seaf->commit_mgr, repo->id, repo->version, repo->head->commit_id); if (!commit) { seaf_warning ("commit %s:%s doesn't exist.\n", repo->id, repo->head->commit_id); goto out; } canon_path = get_canonical_path (parent_dir); dir = seaf_fs_manager_get_seafdir_by_path (seaf->fs_mgr, repo->store_id, repo->version, commit->root_id, canon_path, NULL); if (!dir) { goto out; } GList *ptr; for (ptr = filenames; ptr; ptr = ptr->next) { char *name = ptr->data; char *unique_name = NULL; unique_name = generate_unique_filename (name, dir->entries); if (!unique_name) { ret = TRUE; goto out; } g_free (unique_name); } out: g_free (canon_path); seaf_dir_free (dir); seaf_commit_unref (commit); return ret; } int seaf_repo_manager_post_multi_files (SeafRepoManager *mgr, const char *repo_id, const char *parent_dir, const char *filenames_json, const char *paths_json, const char *user, int replace_existed, gint64 mtime, char **ret_json, char **task_id, GError **error) { SeafRepo *repo = NULL; char *canon_path = NULL; GList *filenames = NULL, *paths = NULL, *id_list = NULL, *size_list = NULL, *ptr; char *filename, *path; char *gc_id = NULL; unsigned char sha1[20]; SeafileCrypt *crypt = NULL; char hex[41]; int ret = 0; GET_REPO_OR_FAIL(repo, repo_id); canon_path = get_canonical_path (parent_dir); /* Decode file name and tmp file paths from json. */ filenames = json_to_file_list (filenames_json); paths = json_to_file_list (paths_json); if (!filenames || !paths) { seaf_debug ("[post files] Invalid filenames or paths.\n"); g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid files"); ret = -1; goto out; } if (!replace_existed && check_files_with_same_name (repo, parent_dir, filenames)) { seaf_debug ("[post files] Too many files with same name.\n"); g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_FILES_WITH_SAME_NAME, "Too many files with same name"); ret = -1; goto out; } /* Check inputs. */ for (ptr = filenames; ptr; ptr = ptr->next) { filename = ptr->data; if (should_ignore_file (filename, NULL)) { seaf_debug ("[post files] Invalid filename %s.\n", filename); g_set_error (error, SEAFILE_DOMAIN, POST_FILE_ERR_FILENAME, "%s", filename); ret = -1; goto out; } } if (strstr (parent_dir, "//") != NULL) { seaf_debug ("[post file] parent_dir cantains // sequence.\n"); g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid parent dir"); ret = -1; goto out; } /* Index tmp files and get file id list. */ if (repo->encrypted) { unsigned char key[32], iv[16]; if (seaf_passwd_manager_get_decrypt_key_raw (seaf->passwd_mgr, repo_id, user, key, iv) < 0) { seaf_debug ("Passwd for repo %s is not set.\n", repo_id); g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "Passwd is not set"); ret = -1; goto out; } crypt = seafile_crypt_new (repo->enc_version, key, iv); } if (!task_id) { gint64 *size; gc_id = seaf_repo_get_current_gc_id(repo); for (ptr = paths; ptr; ptr = ptr->next) { path = ptr->data; size = g_new (gint64, 1); if (seaf_fs_manager_index_blocks (seaf->fs_mgr, repo->store_id, repo->version, path, sha1, size, crypt, TRUE, FALSE, NULL) < 0) { seaf_warning ("failed to index blocks"); g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "Failed to index blocks"); ret = -1; goto out; } rawdata_to_hex(sha1, hex, 20); id_list = g_list_prepend (id_list, g_strdup(hex)); size_list = g_list_prepend (size_list, size); } id_list = g_list_reverse (id_list); size_list = g_list_reverse (size_list); ret = post_files_and_gen_commit (filenames, repo->id, user, ret_json, replace_existed, canon_path, id_list, size_list, mtime, gc_id, error); } else { ret = index_blocks_mgr_start_index (seaf->index_blocks_mgr, filenames, paths, repo_id, user, replace_existed, ret_json == NULL ? FALSE : TRUE, canon_path, crypt, task_id); } out: if (repo) seaf_repo_unref (repo); string_list_free (filenames); string_list_free (paths); string_list_free (id_list); for (ptr = size_list; ptr; ptr = ptr->next) g_free (ptr->data); g_list_free (size_list); g_free (canon_path); g_free (crypt); g_free (gc_id); return ret; } int post_files_and_gen_commit (GList *filenames, const char *repo_id, const char *user, char **ret_json, int replace_existed, const char *canon_path, GList *id_list, GList *size_list, gint64 mtime, char *last_gc_id, GError **error) { SeafRepo *repo = NULL; GList *name_list = NULL; GString *buf = g_string_new (NULL); SeafCommit *head_commit = NULL; char *root_id = NULL; int ret = 0; int retry_cnt = 0; gboolean handle_concurrent_update = TRUE; if (replace_existed == 0) { handle_concurrent_update = FALSE; } GET_REPO_OR_FAIL(repo, repo_id); GET_COMMIT_OR_FAIL(head_commit, repo->id, repo->version, repo->head->commit_id); retry: /* Add the files to parent dir and commit. */ root_id = do_post_multi_files (repo, head_commit->root_id, canon_path, filenames, id_list, size_list, user, replace_existed, mtime, &name_list); if (!root_id) { seaf_warning ("[post multi-file] Failed to post files to %s in repo %s.\n", canon_path, repo->id); g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_INTERNAL, "Failed to put file"); ret = -1; goto out; } guint len = g_list_length (filenames); if (len > 1) g_string_printf (buf, "Added \"%s\" and %u more files.", (char *)(filenames->data), len - 1); else g_string_printf (buf, "Added \"%s\".", (char *)(filenames->data)); if (gen_new_commit (repo->id, head_commit, root_id, user, buf->str, NULL, handle_concurrent_update, TRUE, last_gc_id, error) < 0) { if (*error == NULL || (*error)->code != SEAF_ERR_CONCURRENT_UPLOAD) { ret = -1; goto out; } retry_cnt++; seaf_debug ("[post multi-file] Concurrent upload retry :%d\n", retry_cnt); /* Sleep random time between 0 and 3 seconds. */ usleep (g_random_int_range(0, 30) * 100 * 1000); g_free (root_id); g_clear_error (error); seaf_repo_unref (repo); seaf_commit_unref(head_commit); GET_REPO_OR_FAIL(repo, repo_id); GET_COMMIT_OR_FAIL(head_commit, repo->id, repo->version, repo->head->commit_id); goto retry; } seaf_repo_manager_merge_virtual_repo (seaf->repo_mgr, repo->id, NULL); if (ret_json) *ret_json = format_json_ret (name_list, id_list, size_list); update_repo_size(repo->id); out: if (repo) seaf_repo_unref (repo); if (head_commit) seaf_commit_unref(head_commit); string_list_free (name_list); g_string_free (buf, TRUE); g_free (root_id); return ret; } /* int */ /* seaf_repo_manager_post_file_blocks (SeafRepoManager *mgr, */ /* const char *repo_id, */ /* const char *parent_dir, */ /* const char *file_name, */ /* const char *blockids_json, */ /* const char *paths_json, */ /* const char *user, */ /* gint64 file_size, */ /* int replace_existed, */ /* char **new_id, */ /* GError **error) */ /* { */ /* SeafRepo *repo = NULL; */ /* SeafCommit *head_commit = NULL; */ /* char *canon_path = NULL; */ /* unsigned char sha1[20]; */ /* char buf[SEAF_PATH_MAX]; */ /* char *root_id = NULL; */ /* SeafDirent *new_dent = NULL; */ /* GList *blockids = NULL, *paths = NULL, *ptr; */ /* char hex[41]; */ /* int ret = 0; */ /* blockids = json_to_file_list (blockids_json); */ /* paths = json_to_file_list (paths_json); */ /* if (g_list_length(blockids) != g_list_length(paths)) { */ /* seaf_debug ("[post-blks] Invalid blockids or paths.\n"); */ /* g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid files"); */ /* ret = -1; */ /* goto out; */ /* } */ /* for (ptr = paths; ptr; ptr = ptr->next) { */ /* char *temp_file_path = ptr->data; */ /* if (g_access (temp_file_path, R_OK) != 0) { */ /* seaf_warning ("[post-blks] File block %s doesn't exist or not readable.\n", */ /* temp_file_path); */ /* g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, */ /* "Invalid input file"); */ /* ret = -1; */ /* goto out; */ /* } */ /* } */ /* GET_REPO_OR_FAIL(repo, repo_id); */ /* GET_COMMIT_OR_FAIL(head_commit, repo->id, repo->version, repo->head->commit_id); */ /* if (!canon_path) */ /* canon_path = get_canonical_path (parent_dir); */ /* if (should_ignore_file (file_name, NULL)) { */ /* seaf_debug ("[post-blks] Invalid filename %s.\n", file_name); */ /* g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, */ /* "Invalid filename"); */ /* ret = -1; */ /* goto out; */ /* } */ /* if (strstr (parent_dir, "//") != NULL) { */ /* seaf_debug ("[post-blks] parent_dir cantains // sequence.\n"); */ /* g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, */ /* "Invalid parent dir"); */ /* ret = -1; */ /* goto out; */ /* } */ /* /\* Write blocks. *\/ */ /* if (seaf_fs_manager_index_file_blocks (seaf->fs_mgr, */ /* repo->store_id, repo->version, */ /* paths, */ /* blockids, sha1, file_size) < 0) { */ /* seaf_warning ("Failed to index file blocks"); */ /* g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, */ /* "Failed to index blocks"); */ /* ret = -1; */ /* goto out; */ /* } */ /* rawdata_to_hex(sha1, hex, 20); */ /* new_dent = seaf_dirent_new (dir_version_from_repo_version(repo->version), */ /* hex, STD_FILE_MODE, file_name, */ /* (gint64)time(NULL), user, file_size); */ /* root_id = do_post_file_replace (repo, head_commit->root_id, */ /* canon_path, replace_existed, new_dent); */ /* if (!root_id) { */ /* seaf_warning ("[post-blks] Failed to post file to %s in repo %s.\n", */ /* canon_path, repo->id); */ /* g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, */ /* "Failed to put file"); */ /* ret = -1; */ /* goto out; */ /* } */ /* *new_id = g_strdup(hex); */ /* snprintf(buf, SEAF_PATH_MAX, "Added \"%s\"", file_name); */ /* if (gen_new_commit (repo_id, head_commit, root_id, */ /* user, buf, NULL, error) < 0) */ /* ret = -1; */ /* out: */ /* if (repo) */ /* seaf_repo_unref (repo); */ /* if (head_commit) */ /* seaf_commit_unref(head_commit); */ /* string_list_free (blockids); */ /* string_list_free (paths); */ /* seaf_dirent_free (new_dent); */ /* g_free (root_id); */ /* g_free (canon_path); */ /* if (ret == 0) */ /* update_repo_size(repo_id); */ /* return ret; */ /* } */ int seaf_repo_manager_post_blocks (SeafRepoManager *mgr, const char *repo_id, const char *blockids_json, const char *paths_json, const char *user, GError **error) { SeafRepo *repo = NULL; GList *blockids = NULL, *paths = NULL, *ptr; int ret = 0; blockids = json_to_file_list (blockids_json); paths = json_to_file_list (paths_json); if (g_list_length(blockids) != g_list_length(paths)) { seaf_warning ("[post-blks] Invalid blockids or paths.\n"); g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid files"); ret = -1; goto out; } for (ptr = paths; ptr; ptr = ptr->next) { char *temp_file_path = ptr->data; if (g_access (temp_file_path, R_OK) != 0) { seaf_warning ("[post-blks] File block %s doesn't exist or not readable.\n", temp_file_path); g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid input file"); ret = -1; goto out; } } GET_REPO_OR_FAIL(repo, repo_id); /* Write blocks. */ if (seaf_fs_manager_index_raw_blocks (seaf->fs_mgr, repo->store_id, repo->version, paths, blockids) < 0) { seaf_warning ("Failed to index file blocks.\n"); g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "Failed to index blocks"); ret = -1; goto out; } out: if (repo) seaf_repo_unref (repo); string_list_free (blockids); string_list_free (paths); if (ret == 0) update_repo_size(repo_id); return ret; } static int check_quota_before_commit_blocks (const char *store_id, int version, GList *blockids) { GList *ptr; char *blockid; gint64 total_size = 0; BlockMetadata *bmd; for (ptr = blockids; ptr; ptr = ptr->next) { blockid = ptr->data; bmd = seaf_block_manager_stat_block (seaf->block_mgr, store_id, version, blockid); if (!bmd) { seaf_warning ("Failed to stat block %s in store %s.\n", blockid, store_id); return -1; } total_size += (gint64)bmd->size; g_free (bmd); } return seaf_quota_manager_check_quota_with_delta (seaf->quota_mgr, store_id, total_size); } int seaf_repo_manager_commit_file_blocks (SeafRepoManager *mgr, const char *repo_id, const char *parent_dir, const char *file_name, const char *blockids_json, const char *user, gint64 file_size, int replace_existed, gint64 mtime, char **new_id, GError **error) { SeafRepo *repo = NULL; SeafCommit *head_commit = NULL; char *canon_path = NULL; unsigned char sha1[20]; char buf[SEAF_PATH_MAX]; char *root_id = NULL; SeafDirent *new_dent = NULL; GList *blockids = NULL; char hex[41]; char *gc_id = NULL; int ret = 0; blockids = json_to_file_list (blockids_json); GET_REPO_OR_FAIL(repo, repo_id); GET_COMMIT_OR_FAIL(head_commit, repo->id, repo->version, repo->head->commit_id); if (!canon_path) canon_path = get_canonical_path (parent_dir); if (should_ignore_file (file_name, NULL)) { seaf_warning ("[post-blks] Invalid filename %s.\n", file_name); g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid filename"); ret = -1; goto out; } if (strstr (parent_dir, "//") != NULL) { seaf_warning ("[post-blks] parent_dir cantains // sequence.\n"); g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid parent dir"); ret = -1; goto out; } int rc = check_quota_before_commit_blocks (repo->store_id, repo->version, blockids); if (rc != 0) { g_set_error (error, SEAFILE_DOMAIN, POST_FILE_ERR_QUOTA_FULL, "Quota full"); ret = -1; goto out; } gc_id = seaf_repo_get_current_gc_id (repo); /* Write blocks. */ if (seaf_fs_manager_index_existed_file_blocks ( seaf->fs_mgr, repo->store_id, repo->version, blockids, sha1, file_size) < 0) { seaf_warning ("Failed to index existed file blocks.\n"); g_set_error (error, SEAFILE_DOMAIN, POST_FILE_ERR_BLOCK_MISSING, "Failed to index file blocks"); ret = -1; goto out; } rawdata_to_hex(sha1, hex, 20); if (mtime <= 0) { mtime = (gint64)time(NULL); } new_dent = seaf_dirent_new (dir_version_from_repo_version(repo->version), hex, STD_FILE_MODE, file_name, mtime, user, file_size); root_id = do_post_file_replace (repo, head_commit->root_id, canon_path, replace_existed, new_dent); if (!root_id) { seaf_warning ("[post-blks] Failed to post file to %s in repo %s.\n", canon_path, repo->id); g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "Failed to put file"); ret = -1; goto out; } *new_id = g_strdup(hex); snprintf(buf, SEAF_PATH_MAX, "Added \"%s\"", file_name); if (gen_new_commit (repo_id, head_commit, root_id, user, buf, NULL, TRUE, TRUE, gc_id, error) < 0) ret = -1; out: if (repo) seaf_repo_unref (repo); if (head_commit) seaf_commit_unref(head_commit); string_list_free (blockids); seaf_dirent_free (new_dent); g_free (root_id); g_free (canon_path); g_free (gc_id); if (ret == 0) update_repo_size(repo_id); return ret; } static char * del_file_recursive(SeafRepo *repo, const char *dir_id, const char *to_path, const char *filename, int *mode, int *p_deleted_num, char **desc_file) { SeafDir *olddir, *newdir; SeafDirent *dent; GList *ptr; char *to_path_dup = NULL; char *remain = NULL; char *slash; char *id = NULL; char *ret = NULL; int deleted_num = 0; olddir = seaf_fs_manager_get_seafdir_sorted(seaf->fs_mgr, repo->store_id, repo->version, dir_id); if (!olddir) return NULL; /* we reach the target dir. Remove the given entry from it. */ if (*to_path == '\0') { SeafDirent *old, *new; GList *newentries = NULL, *p; GList *filenames = NULL, *ptr; char *name; int found_flag; filenames = json_to_file_list (filename); if (!filenames) { seaf_dir_free(olddir); return NULL; } for (p = olddir->entries; p != NULL; p = p->next) { found_flag = 0; old = p->data; for (ptr = filenames; ptr; ptr = ptr->next) { name = ptr->data; if (strcmp(old->name, name) == 0) { found_flag = 1; deleted_num++; if (mode) *mode = old->mode; if (desc_file && *desc_file==NULL) *desc_file = g_strdup(old->name); break; } } if (!found_flag) { new = seaf_dirent_dup (old); newentries = g_list_prepend (newentries, new); } } string_list_free (filenames); if (deleted_num == 0) { ret = g_strdup(olddir->dir_id); if (newentries) g_list_free_full (newentries, (GDestroyNotify)seaf_dirent_free); goto out; } newentries = g_list_reverse (newentries); newdir = seaf_dir_new(NULL, newentries, dir_version_from_repo_version(repo->version)); if (seaf_dir_save(seaf->fs_mgr, repo->store_id, repo->version, newdir) == 0) ret = g_strdup(newdir->dir_id); seaf_dir_free(newdir); goto out; } to_path_dup = g_strdup (to_path); slash = strchr (to_path_dup, '/'); if (!slash) { remain = to_path_dup + strlen(to_path_dup); } else { *slash = '\0'; remain = slash + 1; } for (ptr = olddir->entries; ptr; ptr = ptr->next) { dent = (SeafDirent *)ptr->data; if (strcmp(dent->name, to_path_dup) != 0) continue; id = del_file_recursive(repo, dent->id, remain, filename, mode, &deleted_num, desc_file); if (id != NULL && deleted_num > 0) { memcpy(dent->id, id, 40); dent->id[40] = '\0'; if (repo->version > 0) dent->mtime = (guint64)time(NULL); } break; } if (id != NULL) { if (deleted_num == 0) { ret = g_strdup(olddir->dir_id); } else { /* Create a new SeafDir. */ GList *new_entries; new_entries = dup_seafdir_entries (olddir->entries); newdir = seaf_dir_new (NULL, new_entries, dir_version_from_repo_version(repo->version)); if (seaf_dir_save (seaf->fs_mgr, repo->store_id, repo->version, newdir) == 0) ret = g_strdup (newdir->dir_id); seaf_dir_free (newdir); } } out: if (p_deleted_num) *p_deleted_num = deleted_num; g_free (to_path_dup); g_free (id); seaf_dir_free(olddir); return ret; } static char * do_del_file(SeafRepo *repo, const char *root_id, const char *parent_dir, const char *file_name, int *mode, int *deleted_num, char **desc_file) { /* if parent_dir is a absolutely path, we will remove the first '/' */ if (*parent_dir == '/') parent_dir = parent_dir + 1; return del_file_recursive(repo, root_id, parent_dir, file_name, mode, deleted_num, desc_file); } int seaf_repo_manager_del_file (SeafRepoManager *mgr, const char *repo_id, const char *parent_dir, const char *file_name, const char *user, GError **error) { SeafRepo *repo = NULL; SeafCommit *head_commit = NULL; SeafDir *dir = NULL; char *canon_path = NULL; char buf[SEAF_PATH_MAX]; char *root_id = NULL; char *desc_file = NULL; int mode = 0; int ret = 0; int deleted_num = 0; GET_REPO_OR_FAIL(repo, repo_id); GET_COMMIT_OR_FAIL(head_commit, repo->id, repo->version, repo->head->commit_id); if (!canon_path) canon_path = get_canonical_path (parent_dir); dir = seaf_fs_manager_get_seafdir_by_path (seaf->fs_mgr, repo->store_id, repo->version, head_commit->root_id, canon_path, NULL); if (!dir) { seaf_warning ("parent_dir %s doesn't exist in repo %s.\n", canon_path, repo->store_id); ret = -1; goto out; } root_id = do_del_file (repo, head_commit->root_id, canon_path, file_name, &mode, &deleted_num, &desc_file); if (!root_id) { seaf_warning ("[del file] Failed to del file from %s in repo %s.\n", canon_path, repo->id); g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "Failed to del file"); ret = -1; goto out; } if (deleted_num == 0) { goto out; } /* Commit. */ if (deleted_num > 1) { snprintf(buf, SEAF_PATH_MAX, "Deleted \"%s\" and %d more files", desc_file, deleted_num - 1); } else if (S_ISDIR(mode)) { snprintf(buf, SEAF_PATH_MAX, "Removed directory \"%s\"", desc_file); } else { snprintf(buf, SEAF_PATH_MAX, "Deleted \"%s\"", desc_file); } if (gen_new_commit (repo_id, head_commit, root_id, user, buf, NULL, TRUE, FALSE, NULL, error) < 0) { ret = -1; goto out; } seaf_repo_manager_merge_virtual_repo (mgr, repo_id, NULL); out: if (repo) seaf_repo_unref (repo); if (head_commit) seaf_commit_unref(head_commit); if (dir) seaf_dir_free (dir); g_free (root_id); g_free (canon_path); g_free (desc_file); if (ret == 0) { update_repo_size (repo_id); } return ret; } void do_batch_del_files (ChangeSet *changeset, const char *file_list, int *mode, int *deleted_num, char **desc_file) { GList *filepaths = NULL, *ptr; char *name; filepaths = json_to_file_list (file_list); for (ptr = filepaths; ptr; ptr = ptr->next) { name = ptr->data; if (!name || g_strcmp0 (name, "") == 0) { continue; } char *canon_path = get_canonical_path (name); char *base_name= g_path_get_basename (canon_path); char *del_path = canon_path; if (canon_path[0] == '/') { del_path = canon_path + 1; } remove_from_changeset (changeset, del_path, FALSE, NULL, mode); (*deleted_num)++; if (desc_file && *desc_file == NULL) *desc_file = g_strdup (base_name); g_free (canon_path); g_free (base_name); } string_list_free (filepaths); } int seaf_repo_manager_batch_del_files (SeafRepoManager *mgr, const char *repo_id, const char *file_list, const char *user, GError **error) { SeafRepo *repo = NULL; SeafCommit *head_commit = NULL; SeafDir *dir = NULL; char buf[SEAF_PATH_MAX]; char *root_id = NULL; char *desc_file = NULL; ChangeSet *changeset = NULL; int mode = 0; int ret = 0; int deleted_num = 0; GET_REPO_OR_FAIL(repo, repo_id); GET_COMMIT_OR_FAIL(head_commit, repo->id, repo->version, repo->head->commit_id); dir = seaf_fs_manager_get_seafdir_sorted (seaf->fs_mgr, repo->store_id, repo->version, head_commit->root_id); if (!dir) { seaf_warning ("root dir doesn't exist in repo %s.\n", repo->store_id); ret = -1; goto out; } changeset = changeset_new (repo_id, dir); if (!changeset) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "Failed to batch del files"); ret = -1; goto out; } do_batch_del_files (changeset, file_list, &mode, &deleted_num, &desc_file); if (deleted_num == 0) { goto out; } root_id = commit_tree_from_changeset (changeset); if (!root_id) { seaf_warning ("Failed to commit changeset for repo %s.\n", repo_id); g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "Failed to batch del files"); ret = -1; goto out; } /* Commit. */ if (deleted_num > 1) { snprintf(buf, SEAF_PATH_MAX, "Deleted \"%s\" and %d more files", desc_file, deleted_num - 1); } else if (S_ISDIR(mode)) { snprintf(buf, SEAF_PATH_MAX, "Removed directory \"%s\"", desc_file); } else { snprintf(buf, SEAF_PATH_MAX, "Deleted \"%s\"", desc_file); } if (gen_new_commit (repo_id, head_commit, root_id, user, buf, NULL, TRUE, FALSE, NULL, error) < 0) { ret = -1; goto out; } seaf_repo_manager_merge_virtual_repo (mgr, repo_id, NULL); out: changeset_free (changeset); if (repo) seaf_repo_unref (repo); if (head_commit) seaf_commit_unref(head_commit); if (dir) seaf_dir_free (dir); g_free (root_id); g_free (desc_file); if (ret == 0) { update_repo_size (repo_id); } return ret; } static SeafDirent * get_dirent_by_path (SeafRepo *repo, const char *root_id, const char *path, const char *file_name, GError **error) { SeafCommit *head_commit = NULL; SeafDirent *dent = NULL; SeafDir *dir = NULL; if (!root_id) { head_commit = seaf_commit_manager_get_commit(seaf->commit_mgr, repo->id, repo->version, repo->head->commit_id); if (!head_commit) { seaf_warning ("commit %s:%s doesn't exist.\n", repo->id, repo->head->commit_id); g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid commit"); goto out; } root_id = head_commit->root_id; } dir = seaf_fs_manager_get_seafdir_by_path (seaf->fs_mgr, repo->store_id, repo->version, root_id, path, NULL); if (!dir) { seaf_warning ("dir %s doesn't exist in repo %s.\n", path, repo->id); g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid dir"); goto out; } GList *p; for (p = dir->entries; p; p = p->next) { SeafDirent *d = p->data; int r = strcmp (d->name, file_name); if (r == 0) { dent = seaf_dirent_dup(d); break; } } if (!dent && error && !(*error)) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "failed to get dirent"); } out: if (head_commit) seaf_commit_unref (head_commit); if (dir) seaf_dir_free (dir); return dent; } static int put_dirent_and_commit (SeafRepo *repo, const char *path, SeafDirent *dents[], int n_dents, int replace, const char *user, gboolean check_gc, const char *last_gc_id, GError **error) { SeafCommit *head_commit = NULL; char *root_id = NULL; char buf[SEAF_PATH_MAX]; int ret = 0, i = 0; GET_COMMIT_OR_FAIL(head_commit, repo->id, repo->version, repo->head->commit_id); root_id = head_commit->root_id; GList *dent_list = NULL; GList *name_list = NULL; for (i = 0; i < n_dents; i++) dent_list = g_list_append (dent_list, dents[i]); if (*path == '/') path = path + 1; root_id = post_multi_files_recursive (repo, root_id, path, dent_list, user, replace, &name_list); g_list_free (dent_list); g_list_free_full (name_list, (GDestroyNotify)g_free); if (!root_id) { if (n_dents > 1) seaf_warning ("[cp file] Failed to cp %s and other %d files to %s in repo %s.\n", dents[0]->name, n_dents - 1, path, repo->id); else seaf_warning ("[cp file] Failed to cp %s to %s in repo %s.\n", dents[0]->name, path, repo->id); g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "Failed to cp file"); ret = -1; goto out; } /* Commit. */ if (n_dents > 1) { snprintf(buf, sizeof(buf), "Added \"%s\" and %d more files", dents[0]->name, n_dents - 1); } else if (S_ISDIR(dents[0]->mode)) { snprintf(buf, sizeof(buf), "Added directory \"%s\"", dents[0]->name); } else { snprintf(buf, sizeof(buf), "Added \"%s\"", dents[0]->name); } if (gen_new_commit (repo->id, head_commit, root_id, user, buf, NULL, TRUE, check_gc, last_gc_id, error) < 0) ret = -1; out: if (head_commit) seaf_commit_unref (head_commit); if (root_id) g_free (root_id); return ret; } static int write_block (const char *repo_id, const char *block_id, int version, const char *buf, int len) { SeafBlockManager *mgr = seaf->block_mgr; BlockHandle *handle; int n; /* Don't write if the block already exists. */ if (seaf_block_manager_block_exists (mgr, repo_id, version, block_id)) { return 0; } handle = seaf_block_manager_open_block (mgr, repo_id, version, block_id, BLOCK_WRITE); if (!handle) { seaf_warning ("Failed to open block %s.\n", block_id); return -1; } n = seaf_block_manager_write_block (mgr, handle, buf, len); if (n < 0) { seaf_warning ("Failed to write block %s.\n", block_id); seaf_block_manager_close_block (mgr, handle); seaf_block_manager_block_handle_free (mgr, handle); return -1; } if (seaf_block_manager_close_block (mgr, handle) < 0) { seaf_warning ("failed to close block %s.\n", block_id); seaf_block_manager_block_handle_free (mgr, handle); return -1; } if (seaf_block_manager_commit_block (mgr, handle) < 0) { seaf_warning ("failed to commit block %s.\n", block_id); seaf_block_manager_block_handle_free (mgr, handle); return -1; } seaf_block_manager_block_handle_free (mgr, handle); return 0; } // return a new block id. static char * copy_block_between_enc_repo (SeafRepo *src_repo, SeafRepo *dst_repo, SeafileCrypt *src_crypt, SeafileCrypt *dst_crypt, const char *block_id) { SeafBlockManager *mgr = seaf->block_mgr; char *ret = NULL; BlockHandle *handle = NULL; BlockMetadata *bmd = NULL; char *buf = NULL; char *src_dec_out = NULL; int src_dec_out_len = -1; SHA_CTX ctx; uint8_t checksum[CHECKSUM_LENGTH]; char checksum_str[41]; int block_size = 0; int n; if (g_strcmp0 (block_id, EMPTY_SHA1) == 0) { ret = g_strdup (block_id); goto out; } // Read block from source repo. handle = seaf_block_manager_open_block(mgr, src_repo->store_id, src_repo->version, block_id, BLOCK_READ); if (!handle) { seaf_warning ("Failed to open block %s.\n", block_id); return NULL; } bmd = seaf_block_manager_stat_block_by_handle (mgr, handle); if (!bmd) { seaf_warning ("Failed to stat block %s by handle.\n", block_id); goto out; } block_size = bmd->size; if (block_size == 0) { ret = g_strdup (block_id); goto out; } buf = g_new (char, block_size); n = seaf_block_manager_read_block(seaf->block_mgr, handle, buf, block_size); if (n != block_size) { seaf_warning ("Failed to read block from source repo %s.\n", src_repo->id); goto out; } if (src_crypt != NULL) { int rc = seafile_decrypt (&src_dec_out, &src_dec_out_len, buf, block_size, src_crypt); if (rc != 0) { seaf_warning ("Failed to decrypt block %s.\n", block_id); goto out; } } // Write block to destination repo. if (src_crypt && dst_crypt) { // Both source and destination repos are encrypted reops. char *dst_enc_buf = NULL; int dst_enc_len = -1; int rc = seafile_encrypt (&dst_enc_buf, &dst_enc_len, src_dec_out, src_dec_out_len, dst_crypt); if (rc != 0) { seaf_warning ("Failed to encrypt block for repo %s.\n", dst_repo->id); goto out; } SHA1_Init (&ctx); SHA1_Update (&ctx, dst_enc_buf, dst_enc_len); SHA1_Final (checksum, &ctx); rawdata_to_hex (checksum, checksum_str, 20); if (write_block (dst_repo->store_id, checksum_str, dst_repo->version, dst_enc_buf, dst_enc_len) < 0) { g_free (dst_enc_buf); goto out; } g_free (dst_enc_buf); ret = g_strdup (checksum_str); } else if (src_crypt && !dst_crypt) { // Source repo is encrypted. SHA1_Init (&ctx); SHA1_Update (&ctx, src_dec_out, src_dec_out_len); SHA1_Final (checksum, &ctx); rawdata_to_hex (checksum, checksum_str, 20); if (write_block (dst_repo->store_id, checksum_str, dst_repo->version, src_dec_out, src_dec_out_len) < 0) { goto out; } ret = g_strdup (checksum_str); } else if (!src_crypt && dst_crypt) { // Destination repo is encrypted. char *dst_enc_buf = NULL; int dst_enc_len = -1; int rc = seafile_encrypt (&dst_enc_buf, &dst_enc_len, buf, block_size, dst_crypt); if (rc != 0) { seaf_warning ("Failed to encrypt block for repo %s.\n", dst_repo->id); goto out; } SHA1_Init (&ctx); SHA1_Update (&ctx, dst_enc_buf, dst_enc_len); SHA1_Final (checksum, &ctx); rawdata_to_hex (checksum, checksum_str, 20); if (write_block (dst_repo->store_id, checksum_str, dst_repo->version, dst_enc_buf, dst_enc_len) < 0) { g_free (dst_enc_buf); goto out; } g_free (dst_enc_buf); ret = g_strdup (checksum_str); } else if (!src_crypt && !dst_crypt) { // Both source and destination repos are not encrypted reops. if (write_block (dst_repo->store_id, block_id, dst_repo->version, buf, block_size) < 0) { goto out; } ret = g_strdup (checksum_str); } out: g_free (buf); g_free (src_dec_out); if (handle) { seaf_block_manager_close_block (mgr, handle); seaf_block_manager_block_handle_free (mgr, handle); } if (bmd) g_free (bmd); return ret; } static char * copy_seafile (SeafRepo *src_repo, SeafRepo *dst_repo, SeafileCrypt *src_crypt, SeafileCrypt *dst_crypt, const char *file_id, CopyTask *task, guint64 *size) { Seafile *file; file = seaf_fs_manager_get_seafile (seaf->fs_mgr, src_repo->store_id, src_repo->version, file_id); if (!file) { seaf_warning ("Failed to get file object %s from repo %s.\n", file_id, src_repo->id); return NULL; } /* We may be copying from v0 repo to v1 repo or vise versa. */ file->version = seafile_version_from_repo_version(dst_repo->version); int i; char *block_id; for (i = 0; i < file->n_blocks; ++i) { /* Check cancel before copying a block. */ if (task && g_atomic_int_get (&task->canceled)) { seafile_unref (file); return NULL; } block_id = file->blk_sha1s[i]; if (src_crypt != NULL || dst_crypt != NULL) { char *new_block_id = copy_block_between_enc_repo (src_repo, dst_repo, src_crypt, dst_crypt, block_id); if (new_block_id == NULL) { seaf_warning ("Failed to copy block %s from repo %s to %s.\n", block_id, src_repo->id, dst_repo->id); seafile_unref (file); return NULL; } g_free (file->blk_sha1s[i]); file->blk_sha1s[i] = new_block_id; } else { if (seaf_block_manager_copy_block (seaf->block_mgr, src_repo->store_id, src_repo->version, dst_repo->store_id, dst_repo->version, block_id) < 0) { seaf_warning ("Failed to copy block %s from repo %s to %s.\n", block_id, src_repo->id, dst_repo->id); seafile_unref (file); return NULL; } } } // Save fs after copy blocks, block_id may be changed when copy between encrypted repos. if (seafile_save (seaf->fs_mgr, dst_repo->store_id, dst_repo->version, file) < 0) { seaf_warning ("Failed to copy file object %s from repo %s to %s.\n", file_id, src_repo->id, dst_repo->id); seafile_unref (file); return NULL; } if (task) ++(task->done); *size = file->file_size; char *ret = g_strdup(file->file_id); seafile_unref (file); return ret; } static char * copy_recursive (SeafRepo *src_repo, SeafRepo *dst_repo, SeafileCrypt *src_crypt, SeafileCrypt *dst_crypt, const char *obj_id, guint32 mode, const char *modifier, CopyTask *task, guint64 *size) { if (S_ISREG(mode)) { return copy_seafile (src_repo, dst_repo, src_crypt, dst_crypt, obj_id, task, size); } else if (S_ISDIR(mode)) { SeafDir *src_dir = NULL, *dst_dir = NULL; GList *dst_ents = NULL, *ptr; char *new_id = NULL; SeafDirent *dent, *new_dent = NULL; src_dir = seaf_fs_manager_get_seafdir (seaf->fs_mgr, src_repo->store_id, src_repo->version, obj_id); if (!src_dir) { seaf_warning ("Seafdir %s doesn't exist in repo %s.\n", obj_id, src_repo->id); return NULL; } for (ptr = src_dir->entries; ptr; ptr = ptr->next) { dent = ptr->data; guint64 new_size = 0; new_id = copy_recursive (src_repo, dst_repo, src_crypt, dst_crypt, dent->id, dent->mode, modifier, task, &new_size); if (!new_id) { seaf_dir_free (src_dir); return NULL; } new_dent = seaf_dirent_new (dir_version_from_repo_version(dst_repo->version), new_id, dent->mode, dent->name, dent->mtime, modifier, new_size); dst_ents = g_list_prepend (dst_ents, new_dent); g_free (new_id); } dst_ents = g_list_reverse (dst_ents); seaf_dir_free (src_dir); dst_dir = seaf_dir_new (NULL, dst_ents, dir_version_from_repo_version(dst_repo->version)); if (seaf_dir_save (seaf->fs_mgr, dst_repo->store_id, dst_repo->version, dst_dir) < 0) { seaf_warning ("Failed to save new dir.\n"); seaf_dir_free (dst_dir); return NULL; } char *ret = g_strdup(dst_dir->dir_id); *size = 0; seaf_dir_free (dst_dir); return ret; } return NULL; } static GHashTable * get_sub_dirents_hash_map(SeafRepo *repo, const char *parent_dir) { GError *error; GList *p; SeafDir *dir = seaf_fs_manager_get_seafdir_by_path (seaf->fs_mgr, repo->store_id, repo->version, repo->root_id, parent_dir, &error); if (!dir) { if (error) { seaf_warning ("Failed to get dir %s repo %.8s: %s.\n", parent_dir, repo->store_id, error->message); g_clear_error(&error); } else { seaf_warning ("dir %s doesn't exist in repo %.8s.\n", parent_dir, repo->store_id); } return NULL; } GHashTable *dirent_hash = g_hash_table_new_full(g_str_hash, g_str_equal, g_free, (GDestroyNotify)seaf_dirent_free); for (p = dir->entries; p; p = p->next) { SeafDirent *d = p->data; g_hash_table_insert(dirent_hash, g_strdup(d->name), d); } g_list_free (dir->entries); g_free (dir->ondisk); g_free(dir); return dirent_hash; } static void set_failed_reason (char **failed_reason, char *err_str) { *failed_reason = g_strdup (err_str); } static SeafileCrypt * get_crypt_by_repo (SeafRepo *repo, const char *user) { char *key_hex, *iv_hex; unsigned char enc_key[32], enc_iv[16]; SeafileCryptKey *key = NULL; SeafileCrypt *crypt = NULL; key = seaf_passwd_manager_get_decrypt_key (seaf->passwd_mgr, repo->id, user); if (!key) { return NULL; } g_object_get (key, "key", &key_hex, "iv", &iv_hex, NULL); if (repo->enc_version == 1) hex_to_rawdata (key_hex, enc_key, 16); else hex_to_rawdata (key_hex, enc_key, 32); hex_to_rawdata (iv_hex, enc_iv, 16); crypt = seafile_crypt_new (repo->enc_version, enc_key, enc_iv); g_free (key_hex); g_free (iv_hex); g_object_unref (key); return crypt; } static int cross_repo_copy (const char *src_repo_id, const char *src_path, const char *src_filename, const char *dst_repo_id, const char *dst_path, const char *dst_filename, int replace, const char *modifier, CopyTask *task) { SeafRepo *src_repo = NULL, *dst_repo = NULL; SeafDirent **src_dents = NULL, **dst_dents = NULL; GList *src_names = NULL, *dst_names = NULL, *ptr; char *name; char *new_id = NULL; guint64 new_size = 0; int ret = 0, i = 0; int file_num = 0; GHashTable *dirent_hash = NULL; gint64 total_size_all = 0; char *err_str = COPY_ERR_INTERNAL; int check_quota_ret; SeafileCrypt *src_crypt = NULL; SeafileCrypt *dst_crypt = NULL; char *gc_id = NULL; src_repo = seaf_repo_manager_get_repo (seaf->repo_mgr, src_repo_id); if (!src_repo) { err_str = COPY_ERR_INTERNAL; ret = -1; seaf_warning ("Failed to get source repo.\n"); goto out; } if (src_repo->encrypted) { src_crypt = get_crypt_by_repo (src_repo, modifier); if (!src_crypt) { err_str = COPY_ERR_INTERNAL; ret = -1; seaf_warning ("The source repo is encrypted. Please provide password to view it.\n"); goto out; } } dst_repo = seaf_repo_manager_get_repo (seaf->repo_mgr, dst_repo_id); if (!dst_repo) { err_str = COPY_ERR_INTERNAL; ret = -1; seaf_warning ("Failed to get destination repo.\n"); goto out; } if (dst_repo->encrypted) { dst_crypt = get_crypt_by_repo (dst_repo, modifier); if (!dst_crypt) { err_str = COPY_ERR_INTERNAL; ret = -1; seaf_warning ("The destination repo is encrypted. Please provide password to view it.\n"); goto out; } } src_names = json_to_file_list (src_filename); dst_names = json_to_file_list (dst_filename); file_num = g_list_length (src_names); gc_id = seaf_repo_get_current_gc_id (dst_repo); src_dents = g_new0 (SeafDirent *, file_num); dst_dents = g_new0 (SeafDirent *, file_num); dirent_hash = get_sub_dirents_hash_map (src_repo, src_path); if (!dirent_hash) { err_str = COPY_ERR_INTERNAL; ret = -1; goto out; } gint64 total_files = -1; gint64 total_files_all = 0; /* check filename, size and file count */ for (ptr = src_names; ptr; ptr = ptr->next) { name = ptr->data; if (strcmp(name, "") == 0) { err_str = COPY_ERR_BAD_ARG; ret = -1; seaf_warning ("[copy files] Bad args: Empty src_filename.\n"); goto out; } src_dents[i] = g_hash_table_lookup (dirent_hash, name); if (!src_dents[i]) { err_str = COPY_ERR_INTERNAL; ret = -1; seaf_warning ("[copy files] File %s not Found.\n", name); goto out; } if (S_ISDIR(src_dents[i]->mode)) total_files = seaf_fs_manager_count_fs_files (seaf->fs_mgr, src_repo->store_id, src_repo->version, src_dents[i]->id); else total_files = 1; if (total_files < 0) { err_str = COPY_ERR_INTERNAL; seaf_warning ("Failed to get file count.\n"); ret = -1; goto out; } total_files_all += total_files; if (!check_file_count_and_size (src_repo, src_dents[i], total_files_all, &total_size_all, &err_str)) { ret = -1; goto out; } i++; } check_quota_ret = seaf_quota_manager_check_quota_with_delta (seaf->quota_mgr, dst_repo_id, total_size_all); if (check_quota_ret != 0) { if (check_quota_ret == -1) { err_str = COPY_ERR_INTERNAL; seaf_warning ("Failed to check quota.\n"); } else { err_str = COPY_ERR_QUOTA_IS_FULL; } ret = -1; goto out; } if (task) task->total = total_files_all; i = 0; /* do copy */ for (ptr = dst_names; ptr; ptr = ptr->next) { name = ptr->data; new_id = copy_recursive (src_repo, dst_repo, src_crypt, dst_crypt, src_dents[i]->id, src_dents[i]->mode, modifier, task, &new_size); if (!new_id) { err_str = COPY_ERR_INTERNAL; ret = -1; seaf_warning ("[copy files] Failed to copy file %s.\n", src_dents[i]->name); goto out; } dst_dents[i] = seaf_dirent_new (dir_version_from_repo_version(dst_repo->version), new_id, src_dents[i]->mode, name, src_dents[i]->mtime, modifier, new_size); g_free (new_id); i++; } if (put_dirent_and_commit (dst_repo, dst_path, dst_dents, file_num, replace, modifier, TRUE, gc_id, NULL) < 0) { err_str = COPY_ERR_INTERNAL; ret = -1; goto out; } if (task) task->successful = TRUE; seaf_repo_manager_merge_virtual_repo (seaf->repo_mgr, dst_repo_id, NULL); out: if (src_repo) seaf_repo_unref (src_repo); if (dst_repo) seaf_repo_unref (dst_repo); g_free (src_crypt); g_free (dst_crypt); if (dirent_hash) g_hash_table_unref(dirent_hash); g_free(src_dents); for (i = 0; i < file_num; i++) seaf_dirent_free (dst_dents[i]); g_free (dst_dents); if (src_names) string_list_free (src_names); if (dst_names) string_list_free (dst_names); g_free (gc_id); if (ret == 0) { update_repo_size (dst_repo_id); } else { if (task && !task->canceled) { task->failed = TRUE; set_failed_reason (&(task->failed_reason), err_str); } } return ret; } static gboolean is_virtual_repo_and_origin (SeafRepo *repo1, SeafRepo *repo2) { if (repo1->virtual_info && strcmp (repo1->virtual_info->origin_repo_id, repo2->id) == 0) return TRUE; if (repo2->virtual_info && strcmp (repo2->virtual_info->origin_repo_id, repo1->id) == 0) return TRUE; return FALSE; } static gboolean check_file_count_and_size (SeafRepo *repo, SeafDirent *dent, gint64 total_files, gint64 *total_size_all, char **err_str) { gint64 total_file_size = 0; gint64 size = -1; if (seaf->copy_mgr->max_files > 0 && total_files > seaf->copy_mgr->max_files) { *err_str = COPY_ERR_TOO_MANY_FILES; seaf_warning("Failed to copy/move file from repo %.8s: Too many files\n", repo->id); return FALSE; } if (S_ISREG(dent->mode)) { if (repo->version > 0) size = dent->size; else size = seaf_fs_manager_get_file_size (seaf->fs_mgr, repo->store_id, repo->version, dent->id); } else { size = seaf_fs_manager_get_fs_size (seaf->fs_mgr, repo->store_id, repo->version, dent->id); } if (size < 0) { *err_str = COPY_ERR_INTERNAL; seaf_warning ("Failed to get dir size of %s:%s.\n", repo->store_id, dent->id); return FALSE; } if (total_size_all) { *total_size_all += size; total_file_size = *total_size_all; } if (seaf->copy_mgr->max_size > 0) { if (total_file_size > seaf->copy_mgr->max_size) { *err_str = COPY_ERR_SIZE_TOO_LARGE; seaf_warning("Failed to copy/move file from repo %.8s: " "Folder or file size is too large.\n", repo->id); return FALSE; } } return TRUE; } /** * Copy a SeafDirent from a SeafDir to another. * * 1. When @src_repo and @dst_repo are not the same repo, neither of them * should be encrypted. * * 2. the file being copied must not exist in the dst path of the dst repo. */ SeafileCopyResult * seaf_repo_manager_copy_file (SeafRepoManager *mgr, const char *src_repo_id, const char *src_path, const char *src_filename, const char *dst_repo_id, const char *dst_path, const char *dst_filename, const char *user, int need_progress, int synchronous, GError **error) { SeafRepo *src_repo = NULL, *dst_repo = NULL; SeafDirent *src_dent = NULL, *dst_dent = NULL; char *src_canon_path = NULL, *dst_canon_path = NULL; SeafCommit *dst_head_commit = NULL; int ret = 0; gboolean background = FALSE; char *task_id = NULL; SeafileCopyResult *res= NULL; GET_REPO_OR_FAIL(src_repo, src_repo_id); if (strcmp(src_repo_id, dst_repo_id) != 0) { GET_REPO_OR_FAIL(dst_repo, dst_repo_id); if (src_repo->encrypted || dst_repo->encrypted) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Can't copy files between encrypted repo(s)"); ret = -1; goto out; } } else { seaf_repo_ref (src_repo); dst_repo = src_repo; } src_canon_path = get_canonical_path (src_path); dst_canon_path = get_canonical_path (dst_path); GET_COMMIT_OR_FAIL(dst_head_commit, dst_repo->id, dst_repo->version, dst_repo->head->commit_id); /* FAIL_IF_FILE_EXISTS(dst_repo->store_id, dst_repo->version, dst_head_commit->root_id, dst_canon_path, dst_filename, NULL); */ if (strcmp (src_repo_id, dst_repo_id) == 0 || is_virtual_repo_and_origin (src_repo, dst_repo)) { /* get src dirent */ src_dent = get_dirent_by_path (src_repo, NULL, src_canon_path, src_filename, error); if (!src_dent) { seaf_warning("[copy file] file %s/%s doesn't exist.\n", src_canon_path, src_filename); ret = -1; goto out; } gint64 file_size = (src_dent->version > 0) ? src_dent->size : -1; /* duplicate src dirent with new name */ dst_dent = seaf_dirent_new (dir_version_from_repo_version(dst_repo->version), src_dent->id, src_dent->mode, dst_filename, src_dent->mtime, user, file_size); if (put_dirent_and_commit (dst_repo, dst_canon_path, &dst_dent, 1, 0, user, FALSE, NULL, error) < 0) { if (!error) g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "failed to put dirent"); ret = -1; goto out; } seaf_repo_manager_merge_virtual_repo (mgr, dst_repo_id, NULL); update_repo_size (dst_repo_id); } else if (!synchronous) { background = TRUE; task_id = seaf_copy_manager_add_task (seaf->copy_mgr, src_repo_id, src_canon_path, src_filename, dst_repo_id, dst_canon_path, dst_filename, 0, user, cross_repo_copy, need_progress); if (need_progress && !task_id) { seaf_warning ("Failed to start copy task.\n"); g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "failed to start copy task"); ret = -1; goto out; } } else { /* Synchronous for cross-repo copy */ if (cross_repo_copy (src_repo_id, src_canon_path, src_filename, dst_repo_id, dst_canon_path, dst_filename, 0, user, NULL) < 0) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "Failed to move"); ret = -1; goto out; } } out: if (src_repo) seaf_repo_unref (src_repo); if (dst_repo) seaf_repo_unref (dst_repo); if (dst_head_commit) seaf_commit_unref(dst_head_commit); if (src_canon_path) g_free (src_canon_path); if (dst_canon_path) g_free (dst_canon_path); if (src_dent) seaf_dirent_free(src_dent); if (dst_dent) seaf_dirent_free(dst_dent); if (ret == 0) { res = seafile_copy_result_new (); g_object_set (res, "background", background, "task_id", task_id, NULL); g_free (task_id); } return res; } static gboolean check_move (SeafRepo *src_repo, SeafRepo *dst_repo, const char *src_path, const char *dst_path, GList *src_names); SeafileCopyResult * seaf_repo_manager_copy_multiple_files (SeafRepoManager *mgr, const char *src_repo_id, const char *src_path, const char *src_filenames, const char *dst_repo_id, const char *dst_path, const char *dst_filenames, const char *user, int need_progress, int synchronous, GError **error) { SeafRepo *src_repo = NULL, *dst_repo = NULL; SeafDirent **src_dents = NULL, **dst_dents = NULL; char *src_canon_path = NULL, *dst_canon_path = NULL; SeafCommit *dst_head_commit = NULL; int i = 0, ret = 0; int file_num = 1; gint64 *file_sizes = NULL; gboolean background = FALSE; char *task_id = NULL; char *name; GList *src_names = NULL, *dst_names = NULL, *ptr; SeafileCopyResult *res = NULL; GHashTable *dirent_hash = NULL; GET_REPO_OR_FAIL(src_repo, src_repo_id); if (strcmp(src_repo_id, dst_repo_id) != 0) { GET_REPO_OR_FAIL(dst_repo, dst_repo_id); } else { seaf_repo_ref (src_repo); dst_repo = src_repo; } src_canon_path = get_canonical_path (src_path); dst_canon_path = get_canonical_path (dst_path); GET_COMMIT_OR_FAIL(dst_head_commit, dst_repo->id, dst_repo->version, dst_repo->head->commit_id); /*FAIL_IF_FILE_EXISTS(dst_repo->store_id, dst_repo->version, dst_head_commit->root_id, dst_canon_path, dst_filename, NULL);*/ src_names = json_to_file_list (src_filenames); dst_names = json_to_file_list (dst_filenames); if (!src_names || !dst_names) { ret = -1; g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Load filenames to json failed"); goto out; } file_num = g_list_length (src_names); int dst_file_num = g_list_length (dst_names); if (dst_file_num != file_num) { ret = -1; g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "The number of files in the parameters does not match"); goto out; } /* copy file within the same repo */ if (src_repo == dst_repo || is_virtual_repo_and_origin (src_repo, dst_repo)) { if (!check_move (src_repo, dst_repo, src_path, dst_path, src_names)) { seaf_warning ("Can not copy directory to its subdirectory"); g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "Can not copy directory to its subdirectory"); ret = -1; goto out; } /* get src dirents */ src_dents = g_new0 (SeafDirent *, file_num); file_sizes = g_new0 (gint64, file_num); dirent_hash = get_sub_dirents_hash_map (src_repo, src_path); if (!dirent_hash) { ret = -1; goto out; } for (ptr = src_names; ptr; ptr = ptr->next) { name = ptr->data; if (strcmp(name, "") == 0) { ret = -1; g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Empty src_filenames"); goto out; } src_dents[i] = g_hash_table_lookup(dirent_hash, name); if (!src_dents[i]) { ret = -1; seaf_warning ("[copy files] File %s not Found.\n", name); goto out; } file_sizes[i] = (src_dents[i]->version > 0) ? src_dents[i]->size : -1; i++; } dst_dents = g_new0 (SeafDirent *, file_num); i = 0; for (ptr = dst_names; ptr; ptr = ptr->next) { name = ptr->data; if (strcmp(name, "") == 0) { ret = -1; g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Empty dst_filenames"); goto out; } /* duplicate src dirents with new names */ dst_dents[i] = seaf_dirent_new (dir_version_from_repo_version (dst_repo->version), src_dents[i]->id, src_dents[i]->mode, name, src_dents[i]->mtime, user, file_sizes[i]); i++; } if (put_dirent_and_commit (dst_repo, dst_canon_path, dst_dents, file_num, 0, user, FALSE, NULL, error) < 0) { if (!error) g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "failed to put dirents"); ret = -1; goto out; } seaf_repo_manager_merge_virtual_repo (mgr, src_repo_id, NULL); update_repo_size (dst_repo_id); } else { /* copy between different repos */ if (!synchronous) { background = TRUE; task_id = seaf_copy_manager_add_task (seaf->copy_mgr, src_repo_id, src_canon_path, src_filenames, dst_repo_id, dst_canon_path, dst_filenames, 0, user, cross_repo_copy, need_progress); if (need_progress && !task_id) { seaf_warning ("Failed to start copy task.\n"); g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "failed to start copy task"); ret = -1; goto out; } } else { /* Synchronous for cross-repo copy */ if (cross_repo_copy (src_repo_id, src_canon_path, src_filenames, dst_repo_id, dst_canon_path, dst_filenames, 0, user, NULL) < 0) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "Failed to move"); ret = -1; goto out; } } // Synchronous copy } //else diffrent repo out: if (src_repo) seaf_repo_unref (src_repo); if (dst_repo) seaf_repo_unref (dst_repo); if (dst_head_commit) seaf_commit_unref(dst_head_commit); if (src_canon_path) g_free (src_canon_path); if (dst_canon_path) g_free (dst_canon_path); if (src_names) string_list_free (src_names); if (dst_names) string_list_free (dst_names); if (file_sizes) g_free (file_sizes); if (src_dents) g_free (src_dents); if (dst_dents) { for (i = 0; i < file_num; i++) seaf_dirent_free (dst_dents[i]); g_free (dst_dents); } if (dirent_hash) g_hash_table_unref(dirent_hash); if (ret == 0) { res = seafile_copy_result_new (); g_object_set (res, "background", background, "task_id", task_id, NULL); g_free (task_id); } return res; } static int move_file_same_repo (const char *repo_id, const char *src_filenames, const char *src_path, SeafDirent *src_dents[], const char *dst_path, SeafDirent *dst_dents[], int file_num, int replace, const char *user, GError **error) { SeafRepo *repo = NULL; SeafCommit *head_commit = NULL; char *root_id_after_put = NULL, *root_id = NULL; char buf[SEAF_PATH_MAX]; int ret = 0, i = 0; GET_REPO_OR_FAIL(repo, repo_id); GET_COMMIT_OR_FAIL(head_commit, repo->id, repo->version, repo->head->commit_id); root_id_after_put = head_commit->root_id; GList *dent_list = NULL; GList *name_list = NULL; for (i = 0; i < file_num; i++) { dent_list = g_list_append (dent_list, dst_dents[i]); } if (*dst_path == '/') dst_path = dst_path + 1; root_id_after_put = post_multi_files_recursive (repo, head_commit->root_id, dst_path, dent_list, user, replace, &name_list); g_list_free (dent_list); g_list_free_full (name_list, (GDestroyNotify)g_free); if (!root_id_after_put) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "move file failed"); ret = -1; goto out; } root_id = do_del_file (repo, root_id_after_put, src_path, src_filenames, NULL, NULL, NULL); if (!root_id) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "move file failed"); ret = -1; goto out; } /* Commit. */ if (file_num > 1) { snprintf(buf, SEAF_PATH_MAX, "Moved \"%s\" and %d more files", src_dents[0]->name,file_num - 1); } else if (S_ISDIR(src_dents[0]->mode)) { snprintf(buf, SEAF_PATH_MAX, "Moved directory \"%s\"", src_dents[0]->name); } else { snprintf(buf, SEAF_PATH_MAX, "Moved \"%s\"", src_dents[0]->name); } if (gen_new_commit (repo_id, head_commit, root_id, user, buf, NULL, TRUE, FALSE, NULL, error) < 0) ret = -1; out: if (repo) seaf_repo_unref (repo); if (head_commit) seaf_commit_unref (head_commit); g_free (root_id_after_put); g_free (root_id); return ret; } static int cross_repo_move (const char *src_repo_id, const char *src_path, const char *src_filename, const char *dst_repo_id, const char *dst_path, const char *dst_filename, int replace, const char *modifier, CopyTask *task) { SeafRepo *src_repo = NULL, *dst_repo = NULL; SeafDirent **src_dents = NULL, **dst_dents = NULL; GList *src_names = NULL, *dst_names = NULL, *ptr; char *name; char *new_id = NULL; guint64 new_size = 0; int ret = 0, i = 0; int file_num = 0; GHashTable *dirent_hash = NULL; gint64 total_size_all = 0; char *err_str = COPY_ERR_INTERNAL; int check_quota_ret; SeafileCrypt *src_crypt = NULL; SeafileCrypt *dst_crypt = NULL; char *gc_id = NULL; src_repo = seaf_repo_manager_get_repo (seaf->repo_mgr, src_repo_id); if (!src_repo) { err_str = COPY_ERR_INTERNAL; ret = -1; seaf_warning ("Failed to get source repo.\n"); goto out; } if (src_repo->encrypted) { src_crypt = get_crypt_by_repo (src_repo, modifier); if (!src_crypt) { err_str = COPY_ERR_INTERNAL; ret = -1; seaf_warning ("The source repo is encrypted. Please provide password to view it.\n"); goto out; } } dst_repo = seaf_repo_manager_get_repo (seaf->repo_mgr, dst_repo_id); if (!dst_repo) { err_str = COPY_ERR_INTERNAL; ret = -1; seaf_warning ("Failed to get destination repo.\n"); goto out; } if (dst_repo->encrypted) { dst_crypt = get_crypt_by_repo (dst_repo, modifier); if (!dst_crypt) { err_str = COPY_ERR_INTERNAL; ret = -1; seaf_warning ("The destination repo is encrypted. Please provide password to view it.\n"); goto out; } } src_names = json_to_file_list (src_filename); dst_names = json_to_file_list (dst_filename); gc_id = seaf_repo_get_current_gc_id (dst_repo); file_num = g_list_length (src_names); src_dents = g_new0 (SeafDirent *, file_num); dst_dents = g_new0 (SeafDirent *, file_num); dirent_hash = get_sub_dirents_hash_map (src_repo, src_path); if (!dirent_hash) { err_str = COPY_ERR_INTERNAL; ret = -1; goto out; } gint64 total_files = -1; gint64 total_files_all = 0; /* check filename, size and file count */ for (ptr = src_names; ptr; ptr = ptr->next) { name = ptr->data; if (strcmp(name, "") == 0) { err_str = COPY_ERR_BAD_ARG; ret = -1; seaf_warning ("[move files] Bad args: Empty src_filename.\n"); goto out; } src_dents[i] = g_hash_table_lookup (dirent_hash, name); if (!src_dents[i]) { err_str = COPY_ERR_INTERNAL; ret = -1; seaf_warning ("[move files] File %s not Found.\n", name); goto out; } if (S_ISDIR(src_dents[i]->mode)) total_files = seaf_fs_manager_count_fs_files (seaf->fs_mgr, src_repo->store_id, src_repo->version, src_dents[i]->id); else total_files = 1; if (total_files < 0) { err_str = COPY_ERR_INTERNAL; seaf_warning ("Failed to get file count.\n"); ret = -1; goto out; } total_files_all += total_files; if (!check_file_count_and_size (src_repo, src_dents[i], total_files_all, &total_size_all, &err_str)) { ret = -1; goto out; } i++; } check_quota_ret = seaf_quota_manager_check_quota_with_delta (seaf->quota_mgr, dst_repo_id, total_size_all); if (check_quota_ret != 0) { if (check_quota_ret == -1) { err_str = COPY_ERR_INTERNAL; seaf_warning ("Failed to check quota.\n"); } else { err_str = COPY_ERR_QUOTA_IS_FULL; } ret = -1; goto out; } if (task) task->total = total_files_all; i = 0; /* do copy */ for (ptr = dst_names; ptr; ptr = ptr->next) { name = ptr->data; new_id = copy_recursive (src_repo, dst_repo, src_crypt, dst_crypt, src_dents[i]->id, src_dents[i]->mode, modifier, task, &new_size); if (!new_id) { err_str = COPY_ERR_INTERNAL; ret = -1; seaf_warning ("[move files] Failed to copy file %s.\n", src_dents[i]->name); goto out; } dst_dents[i] = seaf_dirent_new (dir_version_from_repo_version(dst_repo->version), new_id, src_dents[i]->mode, name, src_dents[i]->mtime, modifier, new_size); g_free (new_id); i++; } if (put_dirent_and_commit (dst_repo, dst_path, dst_dents, file_num, replace, modifier, TRUE, gc_id, NULL) < 0) { err_str = COPY_ERR_INTERNAL; ret = -1; goto out; } seaf_repo_manager_merge_virtual_repo (seaf->repo_mgr, dst_repo_id, NULL); if (seaf_repo_manager_del_file (seaf->repo_mgr, src_repo_id, src_path, src_filename, modifier, NULL) < 0) { err_str = COPY_ERR_INTERNAL; ret = -1; goto out; } if (task) task->successful = TRUE; seaf_repo_manager_merge_virtual_repo (seaf->repo_mgr, src_repo_id, NULL); out: if (src_repo) seaf_repo_unref (src_repo); if (dst_repo) seaf_repo_unref (dst_repo); g_free (src_crypt); g_free (dst_crypt); if (dirent_hash) g_hash_table_unref(dirent_hash); g_free (src_dents); for (i = 0; i < file_num; i++) seaf_dirent_free(dst_dents[i]); g_free (dst_dents); if (src_names) string_list_free (src_names); if (dst_names) string_list_free (dst_names); g_free (gc_id); if (ret == 0) { update_repo_size (dst_repo_id); } else { if (task && !task->canceled) { task->failed = TRUE; set_failed_reason (&(task->failed_reason), err_str); } } return ret; } static gboolean check_move (SeafRepo *src_repo, SeafRepo *dst_repo, const char *src_path, const char *dst_path, GList *src_names) { char *dst_dirent_path = NULL; int len; gboolean ret = TRUE; if (dst_repo->virtual_info) { dst_dirent_path = g_build_path ("/", dst_repo->virtual_info->path, dst_path, NULL); } else { dst_dirent_path = g_strdup (dst_path); } GList *ptr; char *src_dirent_path = NULL; char *name; for (ptr = src_names; ptr; ptr = ptr->next) { name = ptr->data; if (src_repo->virtual_info) { src_dirent_path = g_build_path ("/", src_repo->virtual_info->path, src_path, name, "/", NULL); } else { src_dirent_path = g_build_path ("/", src_path, name, "/", NULL); } len = strlen(src_dirent_path); if (strncmp (dst_dirent_path, src_dirent_path, len) == 0) { g_free (src_dirent_path); ret = FALSE; goto out; } g_free (src_dirent_path); } out: g_free (dst_dirent_path); return ret; } SeafileCopyResult * seaf_repo_manager_move_multiple_files (SeafRepoManager *mgr, const char *src_repo_id, const char *src_path, const char *src_filenames, const char *dst_repo_id, const char *dst_path, const char *dst_filenames, int replace, const char *user, int need_progress, int synchronous, GError **error) { SeafRepo *src_repo = NULL, *dst_repo = NULL; SeafDirent **src_dents = NULL, **dst_dents = NULL; char *src_canon_path = NULL, *dst_canon_path = NULL; SeafCommit *dst_head_commit = NULL; int i = 0, ret = 0; int file_num = 1; gint64 *file_sizes = NULL; gboolean background = FALSE; char *task_id = NULL; char *name; GList *src_names = NULL, *dst_names = NULL, *ptr; SeafileCopyResult *res = NULL; GHashTable *dirent_hash = NULL; GET_REPO_OR_FAIL(src_repo, src_repo_id); if (strcmp(src_repo_id, dst_repo_id) != 0) { GET_REPO_OR_FAIL(dst_repo, dst_repo_id); } else { seaf_repo_ref (src_repo); dst_repo = src_repo; } src_canon_path = get_canonical_path (src_path); dst_canon_path = get_canonical_path (dst_path); GET_COMMIT_OR_FAIL(dst_head_commit, dst_repo->id, dst_repo->version, dst_repo->head->commit_id); /*FAIL_IF_FILE_EXISTS(dst_repo->store_id, dst_repo->version, dst_head_commit->root_id, dst_canon_path, dst_filename, NULL);*/ src_names = json_to_file_list (src_filenames); dst_names = json_to_file_list (dst_filenames); if (!src_names || !dst_names) { ret = -1; g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Load filenames to json failed"); goto out; } file_num = g_list_length (src_names); int dst_file_num = g_list_length (dst_names); if (dst_file_num != file_num) { ret = -1; g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "The number of files in the parameters does not match"); goto out; } gboolean is_virtual_origin = is_virtual_repo_and_origin (src_repo, dst_repo); if (src_repo == dst_repo || is_virtual_origin) { /* get src dirents */ if (!check_move (src_repo, dst_repo, src_path, dst_path, src_names)) { seaf_warning ("Can not move copy directory to its subdirectory"); g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "Can not move directory to its subdirectory"); ret = -1; goto out; } src_dents = g_new0 (SeafDirent *, file_num); file_sizes = g_new0 (gint64, file_num); dirent_hash = get_sub_dirents_hash_map (src_repo, src_path); if (!dirent_hash) { ret = -1; goto out; } for (ptr = src_names; ptr; ptr = ptr->next) { name = ptr->data; if (strcmp(name, "") == 0) { ret = -1; g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Empty src_filenames"); goto out; } src_dents[i] = g_hash_table_lookup(dirent_hash, name); if (!src_dents[i]) { ret = -1; seaf_warning ("[move files] File %s not Found.\n", name); goto out; } file_sizes[i] = (src_dents[i]->version > 0) ? src_dents[i]->size : -1; i++; } dst_dents = g_new0 (SeafDirent *, file_num); i = 0; for (ptr = dst_names; ptr; ptr = ptr->next) { name = ptr->data; if (strcmp(name, "") == 0) { ret = -1; g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Empty dst_filenames"); goto out; } /* duplicate src dirents with new names */ dst_dents[i] = seaf_dirent_new (dir_version_from_repo_version (dst_repo->version), src_dents[i]->id, src_dents[i]->mode, name, src_dents[i]->mtime, user, file_sizes[i]); i++; } /* move file within the same repo */ if (src_repo == dst_repo) { if (move_file_same_repo (src_repo_id, src_filenames, src_canon_path, src_dents, dst_canon_path, dst_dents, file_num, replace, user, error) < 0) { ret = -1; goto out; } } else { /* move between virtual and origin repo */ if (put_dirent_and_commit (dst_repo, dst_path, dst_dents, file_num, replace, user, FALSE, NULL, NULL) < 0) { ret = -1; goto out; } seaf_repo_manager_merge_virtual_repo (mgr, dst_repo->id, NULL); if (seaf_repo_manager_del_file (mgr, src_repo->id, src_path, src_filenames, user, error) < 0) { ret = -1; goto out; } } seaf_repo_manager_merge_virtual_repo (mgr, src_repo_id, NULL); update_repo_size (dst_repo_id); } else { /* move between different repos */ if (!synchronous) { background = TRUE; task_id = seaf_copy_manager_add_task (seaf->copy_mgr, src_repo_id, src_canon_path, src_filenames, dst_repo_id, dst_canon_path, dst_filenames, 0, user, cross_repo_move, need_progress); if (need_progress && !task_id) { seaf_warning ("Failed to start copy task.\n"); g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "failed to start copy task"); ret = -1; goto out; } } else { /* Synchronous for cross-repo move */ if (cross_repo_move (src_repo_id, src_canon_path, src_filenames, dst_repo_id, dst_canon_path, dst_filenames, replace, user, NULL) < 0) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "Failed to move"); ret = -1; goto out; } } // Synchronous move } //else diffrent repo out: if (src_repo) seaf_repo_unref (src_repo); if (dst_repo) seaf_repo_unref (dst_repo); if (dst_head_commit) seaf_commit_unref(dst_head_commit); if (src_canon_path) g_free (src_canon_path); if (dst_canon_path) g_free (dst_canon_path); if (src_names) string_list_free (src_names); if (dst_names) string_list_free (dst_names); if (file_sizes) g_free (file_sizes); if (dirent_hash) g_hash_table_unref(dirent_hash); if (src_dents) g_free (src_dents); if (dst_dents) { for (i = 0; i < file_num; i++) seaf_dirent_free (dst_dents[i]); g_free (dst_dents); } if (ret == 0) { res = seafile_copy_result_new (); g_object_set (res, "background", background, "task_id", task_id, NULL); g_free (task_id); } return res; } int seaf_repo_manager_mkdir_with_parents (SeafRepoManager *mgr, const char *repo_id, const char *parent_dir, const char *new_dir_path, const char *user, GError **error) { SeafRepo *repo = NULL; SeafCommit *head_commit = NULL; char **sub_folders = NULL; int nfolder; char buf[SEAF_PATH_MAX]; char *root_id = NULL; SeafDirent *new_dent = NULL; char *parent_dir_can = NULL; char *relative_dir_can = NULL; char *abs_path = NULL; int total_path_len; int sub_folder_len; GList *uncre_dir_list = NULL; GList *iter_list = NULL; char *uncre_dir; int ret = 0; if (new_dir_path[0] == '/' || new_dir_path[0] == '\\') { seaf_warning ("[mkdir with parent] Invalid relative path %s.\n", new_dir_path); g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid relative path"); return -1; } GET_REPO_OR_FAIL(repo, repo_id); GET_COMMIT_OR_FAIL(head_commit, repo->id, repo->version, repo->head->commit_id); relative_dir_can = get_canonical_path (new_dir_path); sub_folders = g_strsplit (relative_dir_can, "/", 0); nfolder = g_strv_length (sub_folders); int i = 0; for (; i < nfolder; ++i) { if (strcmp (sub_folders[i], "") == 0) continue; if (should_ignore_file (sub_folders[i], NULL)) { seaf_warning ("[post dir] Invalid dir name %s.\n", sub_folders[i]); g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid dir name"); ret = -1; goto out; } } if (strcmp (parent_dir, "/") == 0 || strcmp (parent_dir, "\\") == 0) { parent_dir_can = g_strdup ("/"); abs_path = g_strdup_printf ("%s%s", parent_dir_can, relative_dir_can); } else { parent_dir_can = get_canonical_path (parent_dir); abs_path = g_strdup_printf ("%s/%s", parent_dir_can, relative_dir_can); } if (!abs_path) { seaf_warning ("[mkdir with parent] Out of memory.\n"); g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_INTERNAL, "Out of memory"); ret = -1; goto out; } total_path_len = strlen (abs_path); // from the last, to check the folder exist i = nfolder - 1; for (; i >= 0; --i) { if (strcmp (sub_folders[i], "") == 0) continue; sub_folder_len = strlen (sub_folders[i]) + 1; total_path_len -= sub_folder_len; memset (abs_path + total_path_len, '\0', sub_folder_len); if (check_file_exists (repo->store_id, repo->version, head_commit->root_id, abs_path, sub_folders[i], NULL)) { // folder exist, skip loop to create unexist subfolder strcat (abs_path, "/"); strcat (abs_path, sub_folders[i]); break; } else { // folder not exist, cache it to create later uncre_dir_list = g_list_prepend (uncre_dir_list, sub_folders[i]); } } if (uncre_dir_list) { // exist parent folder has been found, based on it to create unexist subfolder char new_root_id[41]; memcpy (new_root_id, head_commit->root_id, 40); new_root_id[40] = '\0'; for (iter_list = uncre_dir_list; iter_list; iter_list = iter_list->next) { uncre_dir = iter_list->data; new_dent = seaf_dirent_new (dir_version_from_repo_version(repo->version), EMPTY_SHA1, S_IFDIR, uncre_dir, (gint64)time(NULL), NULL, -1); root_id = do_post_file (repo, new_root_id, abs_path, new_dent); if (!root_id) { seaf_warning ("[put dir] Failed to put dir.\n"); g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "Failed to put dir"); ret = -1; seaf_dirent_free (new_dent); goto out; } // the last folder has been created if (!iter_list->next) { seaf_dirent_free (new_dent); break; } strcat (abs_path, "/"); strcat (abs_path, uncre_dir); memcpy (new_root_id, root_id, 40); seaf_dirent_free (new_dent); g_free (root_id); } /* Commit. */ snprintf(buf, SEAF_PATH_MAX, "Added directory \"%s\"", relative_dir_can); if (gen_new_commit (repo_id, head_commit, root_id, user, buf, NULL, TRUE, FALSE, NULL, error) < 0) { ret = -1; g_free (root_id); goto out; } seaf_repo_manager_merge_virtual_repo (mgr, repo_id, NULL); g_free (root_id); } out: if (repo) seaf_repo_unref (repo); if (head_commit) seaf_commit_unref(head_commit); if (sub_folders) g_strfreev (sub_folders); if (uncre_dir_list) g_list_free (uncre_dir_list); if (relative_dir_can) g_free (relative_dir_can); if (parent_dir_can) g_free (parent_dir_can); if (abs_path) g_free (abs_path); return ret; } int seaf_repo_manager_post_dir (SeafRepoManager *mgr, const char *repo_id, const char *parent_dir, const char *new_dir_name, const char *user, GError **error) { SeafRepo *repo = NULL; SeafCommit *head_commit = NULL; char *canon_path = NULL; char buf[SEAF_PATH_MAX]; char *root_id = NULL; SeafDirent *new_dent = NULL; int ret = 0; GET_REPO_OR_FAIL(repo, repo_id); GET_COMMIT_OR_FAIL(head_commit, repo->id, repo->version, repo->head->commit_id); canon_path = get_canonical_path (parent_dir); if (should_ignore_file (new_dir_name, NULL)) { seaf_warning ("[post dir] Invalid dir name %s.\n", new_dir_name); g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid dir name"); ret = -1; goto out; } FAIL_IF_FILE_EXISTS(repo->store_id, repo->version, head_commit->root_id, canon_path, new_dir_name, NULL); if (!new_dent) { new_dent = seaf_dirent_new (dir_version_from_repo_version(repo->version), EMPTY_SHA1, S_IFDIR, new_dir_name, (gint64)time(NULL), NULL, -1); } root_id = do_post_file (repo, head_commit->root_id, canon_path, new_dent); if (!root_id) { seaf_warning ("[put dir] Failed to put dir %s to %s in repo %s.\n", new_dir_name, canon_path, repo->id); g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "Failed to put dir"); ret = -1; goto out; } /* Commit. */ snprintf(buf, SEAF_PATH_MAX, "Added directory \"%s\"", new_dir_name); if (gen_new_commit (repo_id, head_commit, root_id, user, buf, NULL, TRUE, FALSE, NULL, error) < 0) { ret = -1; goto out; } seaf_repo_manager_merge_virtual_repo (mgr, repo_id, NULL); out: if (repo) seaf_repo_unref (repo); if (head_commit) seaf_commit_unref(head_commit); seaf_dirent_free (new_dent); g_free (root_id); g_free (canon_path); return ret; } int seaf_repo_manager_post_empty_file (SeafRepoManager *mgr, const char *repo_id, const char *parent_dir, const char *new_file_name, const char *user, GError **error) { SeafRepo *repo = NULL; SeafCommit *head_commit = NULL; char *canon_path = NULL; char buf[SEAF_PATH_MAX]; char *root_id = NULL; SeafDirent *new_dent = NULL; int ret = 0; GET_REPO_OR_FAIL(repo, repo_id); GET_COMMIT_OR_FAIL(head_commit, repo->id, repo->version, repo->head->commit_id); if (!canon_path) /* no need to call get_canonical_path again when retry */ canon_path = get_canonical_path (parent_dir); if (should_ignore_file (new_file_name, NULL)) { seaf_warning ("[post file] Invalid file name %s.\n", new_file_name); g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid file name"); ret = -1; goto out; } FAIL_IF_FILE_EXISTS(repo->store_id, repo->version, head_commit->root_id, canon_path, new_file_name, NULL); if (!new_dent) { new_dent = seaf_dirent_new (dir_version_from_repo_version(repo->version), EMPTY_SHA1, STD_FILE_MODE, new_file_name, (gint64)time(NULL), user, 0); } root_id = do_post_file (repo, head_commit->root_id, canon_path, new_dent); if (!root_id) { seaf_warning ("[put dir] Failed to create empty file dir.\n"); g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "Failed to put dir"); ret = -1; goto out; } /* Commit. */ snprintf(buf, SEAF_PATH_MAX, "Added \"%s\"", new_file_name); if (gen_new_commit (repo_id, head_commit, root_id, user, buf, NULL, TRUE, FALSE, NULL, error) < 0) { ret = -1; goto out; } seaf_repo_manager_merge_virtual_repo (mgr, repo_id, NULL); update_repo_size (repo_id); out: if (repo) seaf_repo_unref (repo); if (head_commit) seaf_commit_unref(head_commit); seaf_dirent_free (new_dent); g_free (root_id); g_free (canon_path); return ret; } static char * rename_file_recursive(SeafRepo *repo, const char *dir_id, const char *to_path, const char *oldname, const char *newname) { SeafDir *olddir, *newdir; SeafDirent *dent; GList *ptr; char *to_path_dup = NULL; char *remain = NULL; char *slash; char *id = NULL; char *ret = NULL; olddir = seaf_fs_manager_get_seafdir_sorted(seaf->fs_mgr, repo->store_id, repo->version, dir_id); if (!olddir) return NULL; /* we reach the target dir. */ if (*to_path == '\0') { SeafDirent *old, *newdent = NULL; GList *newentries = NULL, *p; /* When renameing, there is a pitfall: we can't simply rename the * dirent, since the dirents are required to be sorted in descending * order. We need to copy all old dirents except the target dirent, * and then rename the target dirent, and then insert the new * dirent, so that we can maintain the descending order of dirents. */ for (p = olddir->entries; p != NULL; p = p->next) { old = p->data; if (strcmp(old->name, oldname) != 0) { newentries = g_list_prepend (newentries, seaf_dirent_dup(old)); } else { newdent = seaf_dirent_new (old->version, old->id, old->mode, newname, old->mtime, old->modifier, old->size); } } newentries = g_list_reverse (newentries); if (newdent) { newentries = g_list_insert_sorted(newentries, newdent, compare_dirents); } newdir = seaf_dir_new (NULL, newentries, dir_version_from_repo_version(repo->version)); if (seaf_dir_save (seaf->fs_mgr, repo->store_id, repo->version, newdir) == 0) ret = g_strndup (newdir->dir_id, 40); seaf_dir_free (newdir); goto out; } to_path_dup = g_strdup (to_path); slash = strchr (to_path_dup, '/'); if (!slash) { remain = to_path_dup + strlen(to_path_dup); } else { *slash = '\0'; remain = slash + 1; } for (ptr = olddir->entries; ptr; ptr = ptr->next) { dent = (SeafDirent *)ptr->data; if (strcmp(dent->name, to_path_dup) != 0) continue; id = rename_file_recursive (repo, dent->id, remain, oldname, newname); if (id != NULL) { memcpy(dent->id, id, 40); dent->id[40] = '\0'; } break; } if (id != NULL) { /* Create a new SeafDir. */ GList *new_entries; new_entries = dup_seafdir_entries (olddir->entries); newdir = seaf_dir_new (NULL, new_entries, dir_version_from_repo_version(repo->version)); if (seaf_dir_save (seaf->fs_mgr, repo->store_id, repo->version, newdir) == 0) ret = g_strdup(newdir->dir_id); seaf_dir_free (newdir); } out: g_free (to_path_dup); g_free (id); seaf_dir_free(olddir); return ret; } static char * do_rename_file(SeafRepo *repo, const char *root_id, const char *parent_dir, const char *oldname, const char *newname) { /* if parent_dir is a absolutely path, we will remove the first '/' */ if (*parent_dir == '/') parent_dir = parent_dir + 1; return rename_file_recursive(repo, root_id, parent_dir, oldname, newname); } int seaf_repo_manager_rename_file (SeafRepoManager *mgr, const char *repo_id, const char *parent_dir, const char *oldname, const char *newname, const char *user, GError **error) { SeafRepo *repo = NULL; SeafCommit *head_commit = NULL; char *root_id = NULL; char *canon_path = NULL; char buf[SEAF_PATH_MAX]; int mode = 0; int ret = 0; if (strcmp(oldname, newname) == 0) return 0; GET_REPO_OR_FAIL(repo, repo_id); GET_COMMIT_OR_FAIL(head_commit, repo->id, repo->version, repo->head->commit_id); if (!canon_path) canon_path = get_canonical_path (parent_dir); if (should_ignore_file (newname, NULL)) { seaf_warning ("[rename file] Invalid filename %s.\n", newname); g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid filename"); ret = -1; goto out; } FAIL_IF_FILE_NOT_EXISTS(repo->store_id, repo->version, head_commit->root_id, canon_path, oldname, &mode); FAIL_IF_FILE_EXISTS(repo->store_id, repo->version, head_commit->root_id, canon_path, newname, NULL); root_id = do_rename_file (repo, head_commit->root_id, canon_path, oldname, newname); if (!root_id) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "faile to rename file %s", oldname); ret = -1; goto out; } /* Commit. */ if (S_ISDIR(mode)) { snprintf(buf, SEAF_PATH_MAX, "Renamed directory \"%s\"", oldname); } else { snprintf(buf, SEAF_PATH_MAX, "Renamed \"%s\"", oldname); } if (gen_new_commit (repo_id, head_commit, root_id, user, buf, NULL, TRUE, FALSE, NULL, error) < 0) { ret = -1; goto out; } seaf_repo_manager_merge_virtual_repo (mgr, repo_id, NULL); out: if (repo) seaf_repo_unref (repo); if (head_commit) seaf_commit_unref (head_commit); g_free (canon_path); g_free (root_id); return ret; } static char * put_file_recursive(SeafRepo *repo, const char *dir_id, const char *to_path, SeafDirent *newdent) { SeafDir *olddir, *newdir; SeafDirent *dent; GList *ptr; char *to_path_dup = NULL; char *remain = NULL; char *slash; char *id = NULL; char *ret = NULL; olddir = seaf_fs_manager_get_seafdir_sorted(seaf->fs_mgr, repo->store_id, repo->version, dir_id); if (!olddir) return NULL; /* we reach the target dir. Update the target dirent. */ if (*to_path == '\0') { GList *newentries = NULL, *p; SeafDirent *dent; for (p = olddir->entries; p; p = p->next) { dent = p->data; if (strcmp(dent->name, newdent->name) == 0) { newentries = g_list_prepend (newentries, seaf_dirent_dup(newdent)); } else { newentries = g_list_prepend (newentries, seaf_dirent_dup(dent)); } } newentries = g_list_reverse (newentries); newdir = seaf_dir_new (NULL, newentries, dir_version_from_repo_version(repo->version)); if (seaf_dir_save (seaf->fs_mgr, repo->store_id, repo->version, newdir) == 0) ret = g_strdup (newdir->dir_id); seaf_dir_free (newdir); goto out; } to_path_dup = g_strdup (to_path); slash = strchr (to_path_dup, '/'); if (!slash) { remain = to_path_dup + strlen(to_path_dup); } else { *slash = '\0'; remain = slash + 1; } for (ptr = olddir->entries; ptr; ptr = ptr->next) { dent = (SeafDirent *)ptr->data; if (strcmp(dent->name, to_path_dup) != 0) continue; id = put_file_recursive (repo, dent->id, remain, newdent); if (id != NULL) { memcpy(dent->id, id, 40); dent->id[40] = '\0'; if (repo->version > 0) dent->mtime = (guint64)time(NULL); } break; } if (id != NULL) { /* Create a new SeafDir. */ GList *new_entries; new_entries = dup_seafdir_entries (olddir->entries); newdir = seaf_dir_new (NULL, new_entries, dir_version_from_repo_version(repo->version)); if (seaf_dir_save (seaf->fs_mgr, repo->store_id, repo->version, newdir) == 0) ret = g_strdup(newdir->dir_id); seaf_dir_free (newdir); } out: g_free (to_path_dup); g_free (id); seaf_dir_free(olddir); return ret; } static char * do_put_file (SeafRepo *repo, const char *root_id, const char *parent_dir, SeafDirent *dent) { /* if parent_dir is a absolutely path, we will remove the first '/' */ if (*parent_dir == '/') parent_dir = parent_dir + 1; return put_file_recursive(repo, root_id, parent_dir, dent); } int seaf_repo_manager_put_file (SeafRepoManager *mgr, const char *repo_id, const char *temp_file_path, const char *parent_dir, const char *file_name, const char *user, const char *head_id, gint64 mtime, char **new_file_id, GError **error) { SeafRepo *repo = NULL; SeafCommit *head_commit = NULL; char *canon_path = NULL; unsigned char sha1[20]; char buf[SEAF_PATH_MAX]; char *root_id = NULL; SeafileCrypt *crypt = NULL; SeafDirent *new_dent = NULL; char hex[41]; char *old_file_id = NULL, *fullpath = NULL; char *gc_id = NULL; int ret = 0; if (g_access (temp_file_path, R_OK) != 0) { seaf_warning ("[put file] File %s doesn't exist or not readable.\n", temp_file_path); g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid input file"); return -1; } GET_REPO_OR_FAIL(repo, repo_id); const char *base = head_id ? head_id : repo->head->commit_id; GET_COMMIT_OR_FAIL(head_commit, repo->id, repo->version, base); if (!canon_path) canon_path = get_canonical_path (parent_dir); if (should_ignore_file (file_name, NULL)) { seaf_warning ("[put file] Invalid filename %s.\n", file_name); g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid filename"); ret = -1; goto out; } if (strstr (parent_dir, "//") != NULL) { seaf_warning ("[put file] parent_dir cantains // sequence.\n"); g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid parent dir"); ret = -1; goto out; } FAIL_IF_FILE_NOT_EXISTS(repo->store_id, repo->version, head_commit->root_id, canon_path, file_name, NULL); /* Write blocks. */ if (repo->encrypted) { unsigned char key[32], iv[16]; if (seaf_passwd_manager_get_decrypt_key_raw (seaf->passwd_mgr, repo_id, user, key, iv) < 0) { seaf_warning ("Passwd for repo %s is not set.\n", repo_id); g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "Passwd is not set"); ret = -1; goto out; } crypt = seafile_crypt_new (repo->enc_version, key, iv); } gc_id = seaf_repo_get_current_gc_id (repo); gint64 size; if (seaf_fs_manager_index_blocks (seaf->fs_mgr, repo->store_id, repo->version, temp_file_path, sha1, &size, crypt, TRUE, FALSE, NULL) < 0) { seaf_warning ("failed to index blocks"); g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "Failed to index blocks"); ret = -1; goto out; } rawdata_to_hex(sha1, hex, 20); if (mtime <= 0) { mtime = (gint64)time(NULL); } new_dent = seaf_dirent_new (dir_version_from_repo_version(repo->version), hex, STD_FILE_MODE, file_name, mtime, user, size); if (!fullpath) fullpath = g_build_filename(parent_dir, file_name, NULL); old_file_id = seaf_fs_manager_path_to_obj_id (seaf->fs_mgr, repo->store_id, repo->version, head_commit->root_id, fullpath, NULL, NULL); if (g_strcmp0(old_file_id, new_dent->id) == 0) { if (new_file_id) *new_file_id = g_strdup(new_dent->id); goto out; } root_id = do_put_file (repo, head_commit->root_id, canon_path, new_dent); if (!root_id) { seaf_warning ("[put file] Failed to put file %s to %s in repo %s.\n", file_name, canon_path, repo->id); g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "Failed to put file"); ret = -1; goto out; } /* Commit. */ snprintf(buf, SEAF_PATH_MAX, "Modified \"%s\"", file_name); if (gen_new_commit (repo_id, head_commit, root_id, user, buf, NULL, TRUE, TRUE, gc_id, error) < 0) { ret = -1; goto out; } if (new_file_id) *new_file_id = g_strdup(new_dent->id); seaf_repo_manager_merge_virtual_repo (mgr, repo_id, NULL); out: if (repo) seaf_repo_unref (repo); if (head_commit) seaf_commit_unref(head_commit); seaf_dirent_free (new_dent); g_free (root_id); g_free (canon_path); g_free (crypt); g_free (old_file_id); g_free (fullpath); g_free (gc_id); if (ret == 0) { update_repo_size (repo_id); } return ret; } static char * gen_commit_description (SeafRepo *repo, const char *root, const char *parent_root) { GList *p; GList *results = NULL; char *desc; diff_commit_roots (repo->store_id, repo->version, parent_root, root, &results, TRUE); desc = diff_results_to_description (results); for (p = results; p; p = p->next) { DiffEntry *de = p->data; diff_entry_free (de); } g_list_free (results); return desc; } int seaf_repo_manager_update_dir (SeafRepoManager *mgr, const char *repo_id, const char *dir_path, const char *new_dir_id, const char *user, const char *head_id, char *new_commit_id, GError **error) { SeafRepo *repo = NULL; SeafCommit *head_commit = NULL; char *canon_path = NULL; char *parent = NULL, *dirname = NULL; SeafDirent *new_dent = NULL; char *root_id = NULL; char *commit_desc = NULL; int ret = 0; GET_REPO_OR_FAIL(repo, repo_id); const char *base = head_id ? head_id : repo->head->commit_id; GET_COMMIT_OR_FAIL(head_commit, repo->id, repo->version, base); /* Are we updating the root? */ if (strcmp (dir_path, "/") == 0) { commit_desc = gen_commit_description (repo, new_dir_id, head_commit->root_id); if (!commit_desc) commit_desc = g_strdup("Auto merge by system"); if (gen_new_commit (repo_id, head_commit, new_dir_id, user, commit_desc, new_commit_id, TRUE, FALSE, NULL, error) < 0) ret = -1; g_free (commit_desc); goto out; } parent = g_path_get_dirname (dir_path); canon_path = get_canonical_path (parent); g_free (parent); dirname = g_path_get_basename (dir_path); FAIL_IF_FILE_NOT_EXISTS(repo->store_id, repo->version, head_commit->root_id, canon_path, dirname, NULL); new_dent = seaf_dirent_new (dir_version_from_repo_version(repo->version), new_dir_id, S_IFDIR, dirname, (gint64)time(NULL), NULL, -1); root_id = do_put_file (repo, head_commit->root_id, canon_path, new_dent); if (!root_id) { seaf_warning ("[update dir] Failed to put file.\n"); g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "Failed to update dir"); ret = -1; goto out; } commit_desc = gen_commit_description (repo, root_id, head_commit->root_id); if (!commit_desc) commit_desc = g_strdup("Auto merge by system"); if (gen_new_commit (repo_id, head_commit, root_id, user, commit_desc, new_commit_id, TRUE, FALSE, NULL, error) < 0) { ret = -1; g_free (commit_desc); goto out; } g_free (commit_desc); out: seaf_repo_unref (repo); seaf_commit_unref (head_commit); seaf_dirent_free (new_dent); g_free (canon_path); g_free (dirname); g_free (root_id); if (ret == 0) update_repo_size (repo_id); return ret; } /* int */ /* seaf_repo_manager_put_file_blocks (SeafRepoManager *mgr, */ /* const char *repo_id, */ /* const char *parent_dir, */ /* const char *file_name, */ /* const char *blockids_json, */ /* const char *paths_json, */ /* const char *user, */ /* const char *head_id, */ /* gint64 file_size, */ /* char **new_file_id, */ /* GError **error) */ /* { */ /* SeafRepo *repo = NULL; */ /* SeafCommit *head_commit = NULL; */ /* char *canon_path = NULL; */ /* unsigned char sha1[20]; */ /* char buf[SEAF_PATH_MAX]; */ /* char *root_id = NULL; */ /* SeafDirent *new_dent = NULL; */ /* char hex[41]; */ /* GList *blockids = NULL, *paths = NULL, *ptr; */ /* char *old_file_id = NULL, *fullpath = NULL; */ /* int ret = 0; */ /* blockids = json_to_file_list (blockids_json); */ /* paths = json_to_file_list (paths_json); */ /* if (g_list_length(blockids) != g_list_length(paths)) { */ /* seaf_warning ("[put-blks] Invalid blockids or paths.\n"); */ /* g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid files"); */ /* ret = -1; */ /* goto out; */ /* } */ /* for (ptr = paths; ptr; ptr = ptr->next) { */ /* char *temp_file_path = ptr->data; */ /* if (g_access (temp_file_path, R_OK) != 0) { */ /* seaf_warning ("[put-blks] File block %s doesn't exist or not readable.\n", */ /* temp_file_path); */ /* g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, */ /* "Invalid input file"); */ /* ret = -1; */ /* goto out; */ /* } */ /* } */ /* GET_REPO_OR_FAIL(repo, repo_id); */ /* const char *base = head_id ? head_id : repo->head->commit_id; */ /* GET_COMMIT_OR_FAIL(head_commit, repo->id, repo->version, base); */ /* if (!canon_path) */ /* canon_path = get_canonical_path (parent_dir); */ /* if (should_ignore_file (file_name, NULL)) { */ /* seaf_warning ("[put-blks] Invalid filename %s.\n", file_name); */ /* g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, */ /* "Invalid filename"); */ /* ret = -1; */ /* goto out; */ /* } */ /* if (strstr (parent_dir, "//") != NULL) { */ /* seaf_warning ("[put-blks] parent_dir cantains // sequence.\n"); */ /* g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, */ /* "Invalid parent dir"); */ /* ret = -1; */ /* goto out; */ /* } */ /* FAIL_IF_FILE_NOT_EXISTS(repo->store_id, repo->version, */ /* head_commit->root_id, canon_path, file_name, NULL); */ /* /\* Write blocks. *\/ */ /* if (seaf_fs_manager_index_file_blocks (seaf->fs_mgr, */ /* repo->store_id, repo->version, */ /* paths, */ /* blockids, sha1, file_size) < 0) { */ /* seaf_warning ("failed to index blocks"); */ /* g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, */ /* "Failed to index blocks"); */ /* ret = -1; */ /* goto out; */ /* } */ /* rawdata_to_hex(sha1, hex, 20); */ /* new_dent = seaf_dirent_new (dir_version_from_repo_version(repo->version), */ /* hex, STD_FILE_MODE, file_name, */ /* (gint64)time(NULL), user, file_size); */ /* if (!fullpath) */ /* fullpath = g_build_filename(parent_dir, file_name, NULL); */ /* old_file_id = seaf_fs_manager_path_to_obj_id (seaf->fs_mgr, */ /* repo->store_id, repo->version, */ /* head_commit->root_id, */ /* fullpath, NULL, NULL); */ /* if (g_strcmp0(old_file_id, new_dent->id) == 0) { */ /* if (new_file_id) */ /* *new_file_id = g_strdup(new_dent->id); */ /* goto out; */ /* } */ /* root_id = do_put_file (repo, head_commit->root_id, canon_path, new_dent); */ /* if (!root_id) { */ /* seaf_warning ("[put-blks] Failed to put file.\n"); */ /* g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, */ /* "Failed to put file"); */ /* ret = -1; */ /* goto out; */ /* } */ /* /\* Commit. *\/ */ /* snprintf(buf, SEAF_PATH_MAX, "Modified \"%s\"", file_name); */ /* if (gen_new_commit (repo_id, head_commit, root_id, user, buf, NULL, error) < 0) { */ /* ret = -1; */ /* goto out; */ /* } */ /* if (new_file_id) */ /* *new_file_id = g_strdup(new_dent->id); */ /* out: */ /* if (repo) */ /* seaf_repo_unref (repo); */ /* if (head_commit) */ /* seaf_commit_unref(head_commit); */ /* string_list_free (blockids); */ /* string_list_free (paths); */ /* seaf_dirent_free (new_dent); */ /* g_free (root_id); */ /* g_free (canon_path); */ /* g_free (old_file_id); */ /* g_free (fullpath); */ /* if (ret == 0) { */ /* update_repo_size (repo_id); */ /* } */ /* return ret; */ /* } */ /* split filename into base and extension */ static void filename_splitext (const char *filename, char **base, char **ext) { char *dot = strrchr(filename, '.'); if (!dot) { *base = g_strdup(filename); *ext = NULL; } else { *dot = '\0'; *base = g_strdup(filename); *dot = '.'; *ext = g_strdup(dot); } } static char * revert_file_to_root (SeafRepo *repo, const char *root_id, SeafDirent *old_dent, gboolean *skipped, GError **error) { SeafDir *dir = NULL; SeafDirent *dent = NULL, *newdent = NULL; char *basename = NULL, *ext = NULL; char new_file_name[SEAF_PATH_MAX]; char *new_root_id = NULL; int i = 1; GList *p; *skipped = FALSE; dir = seaf_fs_manager_get_seafdir_by_path (seaf->fs_mgr, repo->store_id, repo->version, root_id, "/", error); if (*error) { return NULL; } snprintf (new_file_name, sizeof(new_file_name), "%s", old_dent->name); filename_splitext(old_dent->name, &basename, &ext); for (;;) { for (p = dir->entries; p; p = p->next) { dent = p->data; if (strcmp(dent->name, new_file_name) != 0) continue; if (S_ISREG(dent->mode)) { /* same named file */ if (strcmp(dent->id, old_dent->id) == 0) { *skipped = TRUE; goto out; } else { /* rename and retry */ snprintf (new_file_name, sizeof(new_file_name), "%s (%d)%s", basename, i++, ext); break; } } else if (S_ISDIR(dent->mode)) { /* rename and retry */ snprintf (new_file_name, sizeof(new_file_name), "%s (%d)%s", basename, i++, ext); break; } } if (p == NULL) break; } newdent = seaf_dirent_new (old_dent->version, old_dent->id, STD_FILE_MODE, new_file_name, old_dent->mtime, old_dent->modifier, old_dent->size); new_root_id = do_post_file (repo, root_id, "/", newdent); out: if (dir) seaf_dir_free (dir); g_free (basename); g_free (ext); seaf_dirent_free (newdent); return new_root_id; } static char * revert_file_to_parent_dir (SeafRepo *repo, const char *root_id, const char *parent_dir, SeafDirent *old_dent, gboolean *skipped, GError **error) { SeafDir *dir = NULL; SeafDirent *dent = NULL, *newdent = NULL; char *basename = NULL, *ext = NULL; char new_file_name[SEAF_PATH_MAX]; char *new_root_id = NULL; gboolean is_overwrite = FALSE; int i = 1; GList *p; *skipped = FALSE; dir = seaf_fs_manager_get_seafdir_by_path (seaf->fs_mgr, repo->store_id, repo->version, root_id, parent_dir, error); if (*error) { return NULL; } snprintf (new_file_name, sizeof(new_file_name), "%s", old_dent->name); filename_splitext(old_dent->name, &basename, &ext); while(TRUE) { for (p = dir->entries; p; p = p->next) { dent = p->data; if (strcmp(dent->name, new_file_name) != 0) continue; if (S_ISREG(dent->mode)) { /* same named file */ if (strcmp(dent->id, old_dent->id) == 0) { *skipped = TRUE; goto out; } else { /* same name, different id: just overwrite */ is_overwrite = TRUE; goto do_revert; } } else if (S_ISDIR(dent->mode)) { /* rename and retry */ snprintf (new_file_name, sizeof(new_file_name), "%s (%d)%s", basename, i++, ext); break; } } if (p == NULL) break; } do_revert: newdent = seaf_dirent_new (old_dent->version, old_dent->id, STD_FILE_MODE, new_file_name, old_dent->mtime, old_dent->modifier, old_dent->size); if (is_overwrite) { new_root_id = do_put_file (repo, root_id, parent_dir, newdent); } else { new_root_id = do_post_file (repo, root_id, parent_dir, newdent); } out: if (dir) seaf_dir_free (dir); g_free (basename); g_free (ext); seaf_dirent_free (newdent); return new_root_id; } static gboolean detect_path_exist (SeafRepo *repo, const char *root_id, const char *path, GError **error) { SeafDir *dir; dir = seaf_fs_manager_get_seafdir_by_path (seaf->fs_mgr, repo->store_id, repo->version, root_id, path, error); if (*error) { if (g_error_matches(*error, SEAFILE_DOMAIN, SEAF_ERR_PATH_NO_EXIST)) { /* path does not exist */ g_clear_error(error); return FALSE; } else { /* Other error */ return FALSE; } } seaf_dir_free(dir); return TRUE; } int seaf_repo_manager_revert_file (SeafRepoManager *mgr, const char *repo_id, const char *old_commit_id, const char *file_path, const char *user, GError **error) { SeafRepo *repo = NULL; SeafCommit *head_commit = NULL, *old_commit = NULL; char *parent_dir = NULL, *filename = NULL; SeafDirent *old_dent = NULL; char *canon_path = NULL, *root_id = NULL; char buf[SEAF_PATH_MAX]; char time_str[512]; gboolean parent_dir_exist = FALSE; gboolean skipped = FALSE; int ret = 0; GET_REPO_OR_FAIL(repo, repo_id); GET_COMMIT_OR_FAIL(head_commit, repo->id, repo->version, repo->head->commit_id); /* If old_commit_id is head commit, do nothing. */ if (strcmp(repo->head->commit_id, old_commit_id) == 0) { g_debug ("[revert file] commit is head, do nothing\n"); goto out; } if (!old_commit) { GET_COMMIT_OR_FAIL(old_commit, repo->id, repo->version, old_commit_id); if (strcmp(old_commit->repo_id, repo_id) != 0) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_COMMIT, "bad commit id"); ret = -1; goto out; } } if (!canon_path) { canon_path = get_canonical_path (file_path); if (canon_path[strlen(canon_path) -1 ] == '/') { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_COMMIT, "bad target file path"); ret = -1; goto out; } parent_dir = g_path_get_dirname(canon_path); filename = g_path_get_basename(canon_path); old_dent = get_dirent_by_path (repo, old_commit->root_id, parent_dir, filename, error); if (!old_dent || S_ISDIR(old_dent->mode)) { ret = -1; goto out; } if (*error) { seaf_warning ("[revert file] error: %s\n", (*error)->message); g_clear_error (error); g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "internal error"); ret = -1; goto out; } } parent_dir_exist = detect_path_exist (repo, head_commit->root_id, parent_dir, error); if (*error) { seaf_warning ("[revert file] error: %s\n", (*error)->message); g_clear_error (error); g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "internal error"); ret = -1; goto out; } if (!parent_dir_exist) { /* When parent dir does not exist, create the parent dir first. */ const char *relative_path = parent_dir; if (parent_dir[0] == '/') { relative_path = parent_dir + 1; } seaf_repo_manager_mkdir_with_parents (mgr, repo_id, "/", relative_path, user, error); if (*error) { seaf_warning ("[revert file] failed to create parent dir: %s\n", (*error)->message); g_clear_error (error); g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "internal error"); ret = -1; goto out; } // Get head commit again, after mkdir with parents. seaf_repo_unref (repo); seaf_commit_unref (head_commit); GET_REPO_OR_FAIL(repo, repo_id); GET_COMMIT_OR_FAIL(head_commit, repo->id, repo->version, repo->head->commit_id); root_id = revert_file_to_parent_dir (repo, head_commit->root_id, parent_dir, old_dent, &skipped, error); } else { root_id = revert_file_to_parent_dir (repo, head_commit->root_id, parent_dir, old_dent, &skipped, error); } if (*error) { seaf_warning ("[revert file] error: %s\n", (*error)->message); g_clear_error (error); g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "internal error"); ret = -1; goto out; } if (skipped) { goto out; } if (!root_id) { seaf_warning ("[revert file] Failed to revert file.\n"); g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "Failed to revert file"); ret = -1; goto out; } /* Commit. */ #ifndef WIN32 strftime (time_str, sizeof(time_str), "%F %T", localtime((time_t *)(&old_commit->ctime))); #else strftime (time_str, sizeof(time_str), "%Y-%m-%d %H:%M:%S", localtime((time_t *)(&old_commit->ctime))); #endif snprintf(buf, SEAF_PATH_MAX, "Reverted file \"%s\" to status at %s", filename, time_str); if (gen_new_commit (repo_id, head_commit, root_id, user, buf, NULL, TRUE, FALSE, NULL, error) < 0) { ret = -1; goto out; } seaf_repo_manager_merge_virtual_repo (mgr, repo_id, NULL); out: if (repo) seaf_repo_unref (repo); if (head_commit) seaf_commit_unref (head_commit); if (old_commit) seaf_commit_unref (old_commit); g_free (root_id); g_free (parent_dir); g_free (filename); g_free (canon_path); seaf_dirent_free (old_dent); if (ret == 0) { update_repo_size (repo_id); } return ret; } static char * revert_dir (SeafRepo *repo, const char *root_id, const char *parent_dir, SeafDirent *old_dent, gboolean *skipped, GError **error) { SeafDir *dir = NULL; SeafDirent *dent = NULL, *newdent = NULL; char new_dir_name[SEAF_PATH_MAX]; char *new_root_id = NULL; int i = 1; GList *p; *skipped = FALSE; dir = seaf_fs_manager_get_seafdir_by_path (seaf->fs_mgr, repo->store_id, repo->version, root_id, parent_dir, error); if (*error) { return NULL; } snprintf (new_dir_name, sizeof(new_dir_name), "%s", old_dent->name); for (;;) { for (p = dir->entries; p; p = p->next) { dent = p->data; if (strcmp(dent->name, new_dir_name) != 0) continue; /* the same dir */ if (S_ISDIR(dent->mode) && strcmp(dent->id, old_dent->id) == 0) { *skipped = TRUE; goto out; } else { /* rename and retry */ snprintf (new_dir_name, sizeof(new_dir_name), "%s (%d)", old_dent->name, i++); break; } } if (p == NULL) break; } newdent = seaf_dirent_new (old_dent->version, old_dent->id, S_IFDIR, new_dir_name, old_dent->mtime, NULL, -1); new_root_id = do_post_file (repo, root_id, parent_dir, newdent); out: if (dir) seaf_dir_free (dir); seaf_dirent_free (newdent); return new_root_id; } int seaf_repo_manager_revert_dir (SeafRepoManager *mgr, const char *repo_id, const char *old_commit_id, const char *dir_path, const char *user, GError **error) { SeafRepo *repo = NULL; SeafCommit *head_commit = NULL, *old_commit = NULL; char *parent_dir = NULL, *dirname = NULL; SeafDirent *old_dent = NULL; char *canon_path = NULL, *root_id = NULL; char buf[SEAF_PATH_MAX]; gboolean parent_dir_exist = FALSE; gboolean skipped = FALSE; int ret = 0; GET_REPO_OR_FAIL(repo, repo_id); GET_COMMIT_OR_FAIL(head_commit, repo->id, repo->version, repo->head->commit_id); /* If old_commit_id is head commit, do nothing. */ if (strcmp(repo->head->commit_id, old_commit_id) == 0) { g_debug ("[revert dir] commit is head, do nothing\n"); goto out; } if (!old_commit) { GET_COMMIT_OR_FAIL(old_commit, repo->id, repo->version, old_commit_id); if (strcmp(old_commit->repo_id, repo_id) != 0) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_COMMIT, "bad commit id"); ret = -1; goto out; } } if (!canon_path) { canon_path = get_canonical_path (dir_path); parent_dir = g_path_get_dirname(canon_path); dirname = g_path_get_basename(canon_path); old_dent = get_dirent_by_path (repo, old_commit->root_id, parent_dir, dirname, error); if (!old_dent || S_ISREG(old_dent->mode)) { ret = -1; goto out; } if (*error) { seaf_warning ("[revert dir] error: %s\n", (*error)->message); g_clear_error (error); g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "internal error"); ret = -1; goto out; } } parent_dir_exist = detect_path_exist (repo, head_commit->root_id, parent_dir, error); if (*error) { seaf_warning ("[revert dir] error: %s\n", (*error)->message); g_clear_error (error); g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "internal error"); ret = -1; goto out; } if (!parent_dir_exist) { /* When parent dir does not exist, create the parent dir first. */ const char *relative_path = parent_dir; if (parent_dir[0] == '/') { relative_path = parent_dir + 1; } seaf_repo_manager_mkdir_with_parents (mgr, repo_id, "/", relative_path, user, error); if (*error) { seaf_warning ("[revert file] failed to create parent dir: %s\n", (*error)->message); g_clear_error (error); g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "internal error"); ret = -1; goto out; } // Get head commit again, after mkdir with parents. seaf_repo_unref (repo); seaf_commit_unref (head_commit); GET_REPO_OR_FAIL(repo, repo_id); GET_COMMIT_OR_FAIL(head_commit, repo->id, repo->version, repo->head->commit_id); root_id = revert_dir (repo, head_commit->root_id, parent_dir, old_dent, &skipped, error); } else { root_id = revert_dir (repo, head_commit->root_id, parent_dir, old_dent, &skipped, error); } if (*error) { seaf_warning ("[revert dir] error: %s\n", (*error)->message); g_clear_error (error); g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "internal error"); ret = -1; goto out; } if (skipped) { goto out; } if (!root_id) { seaf_warning ("[revert dir] Failed to revert dir.\n"); g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "Failed to revert dir"); ret = -1; goto out; } /* Commit. */ snprintf(buf, SEAF_PATH_MAX, "Recovered deleted directory \"%s\"", dirname); if (gen_new_commit (repo_id, head_commit, root_id, user, buf, NULL, TRUE, FALSE, NULL, error) < 0) { ret = -1; goto out; } seaf_repo_manager_merge_virtual_repo (mgr, repo_id, NULL); out: if (repo) seaf_repo_unref (repo); if (head_commit) seaf_commit_unref (head_commit); if (old_commit) seaf_commit_unref (old_commit); g_free (root_id); g_free (parent_dir); g_free (dirname); g_free (canon_path); seaf_dirent_free (old_dent); #define REVERT_TO_ROOT 0x1 if (ret == 0) { update_repo_size (repo_id); } return ret; } typedef struct CollectRevisionParam CollectRevisionParam; struct CollectRevisionParam { SeafRepo *repo; const char *path; GList *wanted_commits; GList *file_id_list; GList *file_size_list; int n_commits; GHashTable *file_info_cache; /* > 0: keep a period of history; * == 0: N/A * < 0: keep all history data. */ gint64 truncate_time; gboolean got_latest; gboolean got_second; gboolean not_found_file; GError **error; }; typedef struct FileInfo { gint64 file_size; char *file_id; GList *dir_ids; } FileInfo; static void free_file_info (gpointer info) { if (!info) return; FileInfo *file_info = info; g_free (file_info->file_id); g_list_free_full (file_info->dir_ids, g_free); g_free (file_info); } // compare current commit dir_id with pre commit // if dir_id doesn't change, it means subdir doesn't change, append all sub_dir ids of prev to current // that is it is no need to traverse all sub dir, if root doesn't change static gboolean compare_or_add_id (GList *dir_ids, GList **cur_dir_ids, const char *dir_id) { gboolean ret = FALSE; GList *tmp = dir_ids; if (tmp == NULL || strcmp ((char *)tmp->data, dir_id) != 0) { *cur_dir_ids = g_list_append (*cur_dir_ids, g_strdup (dir_id)); } else { // file doesn't changed, append all dir ids to this commit cache while (tmp) { *cur_dir_ids = g_list_append (*cur_dir_ids, g_strdup ((char *)tmp->data)); tmp = tmp->next; } ret = TRUE; } return ret; } // dir_ids: all dir_ids in prev commit, in the order of fs tree // cur_dir_ids: all dir_ids in current commit // if no error and returned seafdir is NULL, then it means // searched dir doesn't change in pre and current commit static SeafDir* get_seafdir_by_path (const char *repo_id, int version, const char *root_id, const char *path, GList *dir_ids, GList **cur_dir_ids, GError **error) { SeafDir *dir = NULL; SeafDirent *dent; const char *dir_id = root_id; char *name, *saveptr; char *tmp_path = NULL; GList *tmp = dir_ids; dir = seaf_fs_manager_get_seafdir (seaf->fs_mgr, repo_id, version, dir_id); if (!dir) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_DIR_MISSING, "directory is missing"); goto out; } if (compare_or_add_id (tmp, cur_dir_ids, dir_id)) { seaf_dir_free (dir); dir = NULL; goto out; } else if (tmp) { tmp = tmp->next; } if (strcmp (path, ".") == 0 || strcmp (path, "/") == 0) { goto out; } else { tmp_path = g_strdup (path); } name = strtok_r (tmp_path, "/", &saveptr); while (name != NULL) { GList *l; for (l = dir->entries; l != NULL; l = l->next) { dent = l->data; if (strcmp(dent->name, name) == 0 && S_ISDIR(dent->mode)) { dir_id = dent->id; break; } } if (!l) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_PATH_NO_EXIST, "Path does not exists %s", path); seaf_dir_free (dir); dir = NULL; break; } if (compare_or_add_id (tmp, cur_dir_ids, dir_id)) { seaf_dir_free (dir); dir = NULL; goto out; } else if (tmp) { tmp = tmp->next; } SeafDir *prev = dir; dir = seaf_fs_manager_get_seafdir (seaf->fs_mgr, repo_id, version, dir_id); seaf_dir_free (prev); if (!dir) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_DIR_MISSING, "directory is missing"); break; } name = strtok_r (NULL, "/", &saveptr); } out: g_free (tmp_path); return dir; } /* * Return NULL if file is not found, error is still NULL; * If we have IO errors, error is set. */ static FileInfo* get_file_info (SeafRepo *repo, SeafCommit *commit, const char *path, GHashTable *file_info_cache, FileInfo *last_info, GError **error) { SeafDir *dir = NULL; SeafDirent *dirent = NULL; FileInfo *file_info = NULL; GList *tmp; file_info = g_hash_table_lookup (file_info_cache, commit->commit_id); if (file_info) return file_info; char *dir_name = g_path_get_dirname (path); char *file_name = g_path_get_basename (path); GList *cur_dir_ids = NULL; GList *dir_ids = last_info ? last_info->dir_ids : NULL; dir = get_seafdir_by_path (repo->store_id, repo->version, commit->root_id, dir_name, dir_ids, &cur_dir_ids, error); if (*error) { if ((*error)->code == SEAF_ERR_PATH_NO_EXIST) g_clear_error (error); goto out; } if (!dir) { // if no error and return is null from get_seafdir_by_path, it means dir doesn't // change in pre and current commit, so the last_info (file info of pre commit) // is also the current file info file_info = g_new0 (FileInfo, 1); file_info->file_id = g_strdup (last_info->file_id); file_info->dir_ids = cur_dir_ids; file_info->file_size = last_info->file_size; g_hash_table_insert (file_info_cache, g_strdup (commit->commit_id), file_info); } else { for (tmp = dir->entries; tmp; tmp = tmp->next) { dirent = tmp->data; if (strcmp (file_name, dirent->name) == 0 && S_ISREG (dirent->mode)) { break; } } if (tmp) { // from parent dir find the file, cache file info for the next compare file_info = g_new0 (FileInfo, 1); file_info->file_id = g_strdup (dirent->id); file_info->dir_ids = cur_dir_ids; if (repo->version > 0) { file_info->file_size = dirent->size; } else { file_info->file_size = seaf_fs_manager_get_file_size (seaf->fs_mgr, repo->store_id, repo->version, dirent->id); } g_hash_table_insert (file_info_cache, g_strdup (commit->commit_id), file_info); } } out: if (dir) seaf_dir_free (dir); if (!file_info) { g_list_free_full (cur_dir_ids, g_free); } g_free (file_name); g_free (dir_name); return file_info; } static void add_revision_info (CollectRevisionParam *data, SeafCommit *commit, const char *file_id, gint64 file_size) { seaf_commit_ref (commit); data->wanted_commits = g_list_prepend (data->wanted_commits, commit); data->file_id_list = g_list_prepend (data->file_id_list, g_strdup(file_id)); gint64 *size = g_malloc(sizeof(gint64)); *size = file_size; data->file_size_list = g_list_prepend (data->file_size_list, size); ++(data->n_commits); } static gboolean collect_file_revisions (SeafCommit *commit, void *vdata, gboolean *stop) { CollectRevisionParam *data = vdata; SeafRepo *repo = data->repo; const char *path = data->path; GError **error = data->error; GHashTable *file_info_cache = data->file_info_cache; FileInfo *file_info = NULL; FileInfo *parent1_info = NULL; FileInfo *parent2_info = NULL; SeafCommit *parent_commit = NULL; SeafCommit *parent_commit2 = NULL; gboolean ret = TRUE; /* At least find the latest revision. */ if (data->got_latest && data->truncate_time == 0) { *stop = TRUE; return TRUE; } if (data->got_latest && data->truncate_time > 0 && (gint64)(commit->ctime) < data->truncate_time && data->got_second) { *stop = TRUE; data->not_found_file = TRUE; return TRUE; } g_clear_error (error); file_info = get_file_info (data->repo, commit, path, file_info_cache, NULL, error); if (*error) { seaf_warning ("Error when finding %s under %s:%s\n", path, data->repo->id, commit->commit_id); ret = FALSE; goto out; } if (!file_info) { /* Target file is not present in this commit. * Stop traversing after finding the initial version. * Deleted files with the same path are not included in history. */ *stop = TRUE; data->not_found_file = TRUE; goto out; } if (!commit->parent_id) { /* Initial commit */ add_revision_info (data, commit, file_info->file_id, file_info->file_size); goto out; } parent_commit = seaf_commit_manager_get_commit (seaf->commit_mgr, repo->id, repo->version, commit->parent_id); if (!parent_commit) { seaf_warning ("Failed to get commit %s:%s\n", repo->id, commit->parent_id); ret = FALSE; goto out; } parent1_info = get_file_info (data->repo, parent_commit, path, file_info_cache, file_info, error); if (*error) { seaf_warning ("Error when finding %s under %s:%s\n", path, data->repo->id, parent_commit->commit_id); ret = FALSE; goto out; } if (parent1_info && g_strcmp0 (parent1_info->file_id, file_info->file_id) == 0) { /* This commit does not modify the target file */ goto out; } /* In case of a merge, the second parent also need compare */ if (commit->second_parent_id) { parent_commit2 = seaf_commit_manager_get_commit (seaf->commit_mgr, repo->id, repo->version, commit->second_parent_id); if (!parent_commit2) { seaf_warning ("Failed to get commit %s:%s\n", repo->id, commit->second_parent_id); ret = FALSE; goto out; } parent2_info = get_file_info (data->repo, parent_commit2, path, file_info_cache, file_info, error); if (*error) { seaf_warning ("Error when finding %s under %s:%s\n", path, data->repo->id, parent_commit2->commit_id); ret = FALSE; goto out; } if (parent2_info && g_strcmp0 (parent2_info->file_id, file_info->file_id) == 0) { /* This commit does not modify the target file */ goto out; } } if (!data->got_latest) { data->got_latest = TRUE; } else { if (!data->got_second) data->got_second = TRUE; } add_revision_info (data, commit, file_info->file_id, file_info->file_size); out: if (parent_commit) seaf_commit_unref (parent_commit); if (parent_commit2) seaf_commit_unref (parent_commit2); g_hash_table_remove (file_info_cache, commit->commit_id); return ret; } static gboolean path_exists_in_commit (SeafRepo *repo, const char *commit_id, const char *path) { SeafCommit *c = NULL; char *obj_id; guint32 mode; c = seaf_commit_manager_get_commit (seaf->commit_mgr, repo->id, repo->version, commit_id); if (!c) { seaf_warning ("Failed to get commit %s:%.8s.\n", repo->id, commit_id); return FALSE; } obj_id = seaf_fs_manager_path_to_obj_id (seaf->fs_mgr, repo->store_id, repo->version, c->root_id, path, &mode, NULL); seaf_commit_unref (c); if (!obj_id) return FALSE; g_free (obj_id); return TRUE; } static gboolean detect_rename_revision (SeafRepo *repo, SeafCommit *commit, const char *path, char **parent_id, char **old_path) { GList *diff_res = NULL; SeafCommit *p1 = NULL; int rc; gboolean is_renamed = FALSE; while (*path == '/' && *path != 0) ++path; if (!commit->second_parent_id) { p1 = seaf_commit_manager_get_commit (seaf->commit_mgr, repo->id, repo->version, commit->parent_id); if (!p1) { seaf_warning ("Failed to get commit %s:%.8s.\n", repo->id, commit->parent_id); return FALSE; } /* Don't fold diff results for directories. We need to know a file was * renamed when its parent folder was renamed. */ rc = diff_commits (p1, commit, &diff_res, FALSE); seaf_commit_unref (p1); if (rc < 0) { seaf_warning ("Failed to diff.\n"); return FALSE; } } else { rc = diff_merge (commit, &diff_res, FALSE); if (rc < 0) { seaf_warning ("Failed to diff merge.\n"); return FALSE; } } GList *ptr; DiffEntry *de; for (ptr = diff_res; ptr; ptr = ptr->next) { de = ptr->data; if (de->status == DIFF_STATUS_RENAMED && strcmp (de->new_name, path) == 0) { *old_path = g_strdup(de->name); is_renamed = TRUE; break; } } for (ptr = diff_res; ptr; ptr = ptr->next) diff_entry_free ((DiffEntry *)ptr->data); g_list_free (diff_res); if (!is_renamed) return FALSE; /* Determine parent commit containing the old path. */ if (!commit->second_parent_id) *parent_id = g_strdup(commit->parent_id); else { if (path_exists_in_commit (repo, commit->parent_id, *old_path)) *parent_id = g_strdup(commit->parent_id); else if (path_exists_in_commit (repo, commit->second_parent_id, *old_path)) *parent_id = g_strdup(commit->second_parent_id); else { g_free (*old_path); *old_path = NULL; return FALSE; } } return TRUE; } static SeafileCommit * convert_to_seafile_commit (SeafCommit *c) { SeafileCommit *commit = seafile_commit_new (); g_object_set (commit, "id", c->commit_id, "creator_name", c->creator_name, "creator", c->creator_id, "desc", c->desc, "ctime", c->ctime, "repo_id", c->repo_id, "root_id", c->root_id, "parent_id", c->parent_id, "second_parent_id", c->second_parent_id, "version", c->version, "new_merge", c->new_merge, "conflict", c->conflict, "device_name", c->device_name, "client_version", c->client_version, NULL); return commit; } static GList * convert_rpc_commit_list (GList *commit_list, GList *file_id_list, GList *file_size_list, gboolean is_renamed, const char *renamed_old_path) { GList *ret = NULL; GList *ptr1, *ptr2, *ptr3; SeafCommit *c; char *file_id; gint64 *file_size; SeafileCommit *commit; for (ptr1 = commit_list, ptr2 = file_id_list, ptr3 = file_size_list; ptr1 && ptr2 && ptr3; ptr1 = ptr1->next, ptr2 = ptr2->next, ptr3 = ptr3->next) { c = ptr1->data; file_id = ptr2->data; file_size = ptr3->data; commit = convert_to_seafile_commit (c); g_object_set (commit, "rev_file_id", file_id, "rev_file_size", *file_size, NULL); if (ptr1->next == NULL && is_renamed) g_object_set (commit, "rev_renamed_old_path", renamed_old_path, NULL); ret = g_list_prepend (ret, commit); } ret = g_list_reverse (ret); return ret; } GList * seaf_repo_manager_list_file_revisions (SeafRepoManager *mgr, const char *repo_id, const char *start_commit_id, const char *path, int limit, gboolean got_latest, gboolean got_second, GError **error) { SeafRepo *repo = NULL; GList *commit_list = NULL, *file_id_list = NULL, *file_size_list = NULL; GList *ret = NULL, *ptr; CollectRevisionParam data = {0}; SeafCommit *last_commit = NULL; const char *head_id; gboolean is_renamed = FALSE; char *parent_id = NULL, *old_path = NULL; char *next_start_commit= NULL; repo = seaf_repo_manager_get_repo (mgr, repo_id); if (!repo) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "No such repo %s", repo_id); goto out; } data.repo = repo; if (!start_commit_id) head_id = repo->head->commit_id; else head_id = start_commit_id; data.path = path; data.error = error; data.truncate_time = seaf_repo_manager_get_repo_truncate_time (mgr, repo_id); data.wanted_commits = NULL; data.file_id_list = NULL; data.file_size_list = NULL; data.got_latest = got_latest; data.got_second = got_second; data.not_found_file = FALSE; /* A hash table to cache caculated file info of in */ data.file_info_cache = g_hash_table_new_full (g_str_hash, g_str_equal, g_free, free_file_info); if (!seaf_commit_manager_traverse_commit_tree_with_limit (seaf->commit_mgr, repo->id, repo->version, head_id, (CommitTraverseFunc)collect_file_revisions, limit, &data, &next_start_commit, TRUE)) { g_clear_error (error); g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "failed to traverse commit of repo %s", repo_id); goto out; } if (data.wanted_commits) { last_commit = data.wanted_commits->data; is_renamed = detect_rename_revision (repo, last_commit, path, &parent_id, &old_path); if (data.not_found_file && !is_renamed) { // reached file initial commit. g_free (next_start_commit); next_start_commit = NULL; } else if (is_renamed){ // file renamed. g_free (next_start_commit); next_start_commit = g_strdup (parent_id); } commit_list = g_list_reverse (data.wanted_commits); file_id_list = g_list_reverse (data.file_id_list); file_size_list = g_list_reverse (data.file_size_list); char *rename_path = NULL; if (old_path && *old_path != '/') rename_path = g_strconcat ("/", old_path, NULL); else rename_path = g_strdup (old_path); ret = convert_rpc_commit_list (commit_list, file_id_list, file_size_list, is_renamed, rename_path); g_free (rename_path); } else { if (data.not_found_file) { g_free (next_start_commit); next_start_commit = NULL; } } /* Append one commit that only contains 'next_start_commit' */ SeafileCommit *commit = seafile_commit_new (); g_object_set (commit, "next_start_commit", next_start_commit, NULL); ret = g_list_append (ret, commit); out: if (repo) seaf_repo_unref (repo); for (ptr = commit_list; ptr; ptr = ptr->next) seaf_commit_unref ((SeafCommit *)ptr->data); g_list_free (commit_list); string_list_free (file_id_list); for (ptr = file_size_list; ptr; ptr = ptr->next) g_free (ptr->data); g_list_free (file_size_list); if (data.file_info_cache) g_hash_table_destroy (data.file_info_cache); g_free (old_path); g_free (parent_id); g_free (next_start_commit); return ret; } typedef struct CalcFilesLastModifiedParam CalcFilesLastModifiedParam; struct CalcFilesLastModifiedParam { SeafRepo *repo; GError **error; const char *parent_dir; GHashTable *last_modified_hash; GHashTable *current_file_id_hash; SeafCommit *current_commit; }; static gboolean check_non_existing_files (void *key, void *value, void *vdata) { CalcFilesLastModifiedParam *data = vdata; gboolean remove = FALSE; char *file_name = key; gint64 *ctime = g_hash_table_lookup (data->last_modified_hash, file_name); if (!ctime) { /* Impossible */ remove = TRUE; } else if (*ctime != data->current_commit->ctime) { /* This file does not exist in this commit. So it's last modified in * the previous commit. */ remove = TRUE; } return remove; } static gboolean collect_files_last_modified (SeafCommit *commit, void *vdata, gboolean *stop) { CalcFilesLastModifiedParam *data = vdata; GError **error = data->error; SeafDirent *dent = NULL; char *file_id = NULL; SeafDir *dir = NULL; GList *ptr; gboolean ret = TRUE; data->current_commit = commit; dir = seaf_fs_manager_get_seafdir_by_path (seaf->fs_mgr, data->repo->store_id, data->repo->version, commit->root_id, data->parent_dir, error); if (*error) { if (!g_error_matches(*error, SEAFILE_DOMAIN, SEAF_ERR_PATH_NO_EXIST)) { *stop = TRUE; ret = FALSE; goto out; } else { g_clear_error (error); } } if (!dir) { /* The directory does not exist in this commit. So all files are last * modified in the previous commit; */ *stop = TRUE; goto out; } for (ptr = dir->entries; ptr; ptr = ptr->next) { dent = ptr->data; file_id = g_hash_table_lookup (data->current_file_id_hash, dent->name); if (file_id) { if (strcmp(file_id, dent->id) != 0) { g_hash_table_remove (data->current_file_id_hash, dent->name); } else { gint64 *ctime = g_new (gint64, 1); *ctime = commit->ctime; g_hash_table_replace (data->last_modified_hash, g_strdup(dent->name), ctime); } } if (g_hash_table_size(data->current_file_id_hash) == 0) { *stop = TRUE; goto out; } } /* Files not found in the current commit are last modified in the previous * commit */ g_hash_table_foreach_remove (data->current_file_id_hash, check_non_existing_files, data); if (g_hash_table_size(data->current_file_id_hash) == 0) { /* All files under this diretory have been calculated */ *stop = TRUE; goto out; } out: seaf_dir_free (dir); return ret; } /** * Give a directory, return the last modification timestamps of all the files * under this directory. * * First we record the current id of every file, then traverse the commit * tree. Give a commit, for each file, if the file id in that commit is * different than its current id, then this file is last modified in the * commit previous to that commit. */ GList * seaf_repo_manager_calc_files_last_modified (SeafRepoManager *mgr, const char *repo_id, const char *parent_dir, int limit, GError **error) { SeafRepo *repo = NULL; SeafCommit *head_commit = NULL; SeafDir *dir = NULL; GList *ptr = NULL; SeafDirent *dent = NULL; CalcFilesLastModifiedParam data = {0}; GList *ret_list = NULL; repo = seaf_repo_manager_get_repo (mgr, repo_id); if (!repo) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "No such repo %s", repo_id); goto out; } head_commit = seaf_commit_manager_get_commit (seaf->commit_mgr, repo->id, repo->version, repo->head->commit_id); if (!head_commit) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "Failed to get commit %s", repo->head->commit_id); goto out; } dir = seaf_fs_manager_get_seafdir_by_path (seaf->fs_mgr, repo->store_id, repo->version, head_commit->root_id, parent_dir, error); if (*error || !dir) { goto out; } data.repo = repo; /* A hash table of pattern (file_name, current_file_id) */ data.current_file_id_hash = g_hash_table_new_full (g_str_hash, g_str_equal, g_free, g_free); /* A (file_name, last_modified) hashtable. is a heap allocated gint64 */ data.last_modified_hash = g_hash_table_new_full (g_str_hash, g_str_equal, g_free, g_free); for (ptr = dir->entries; ptr; ptr = ptr->next) { dent = ptr->data; g_hash_table_insert (data.current_file_id_hash, g_strdup(dent->name), g_strdup(dent->id)); gint64 *ctime = g_new (gint64, 1); *ctime = head_commit->ctime; g_hash_table_insert (data.last_modified_hash, g_strdup(dent->name), ctime); } if (g_hash_table_size (data.current_file_id_hash) == 0) { /* An empty directory, no need to traverse */ goto out; } data.parent_dir = parent_dir; data.error = error; if (!seaf_commit_manager_traverse_commit_tree_with_limit (seaf->commit_mgr, repo->id, repo->version, repo->head->commit_id, (CommitTraverseFunc)collect_files_last_modified, limit, &data, NULL, FALSE)) { if (*error) seaf_warning ("error when traversing commits: %s\n", (*error)->message); else seaf_warning ("error when traversing commits.\n"); g_clear_error (error); g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "failed to traverse commit of repo %s", repo_id); goto out; } GHashTableIter iter; gpointer key, value; g_hash_table_iter_init (&iter, data.last_modified_hash); while (g_hash_table_iter_next (&iter, &key, &value)) { SeafileFileLastModifiedInfo *info; gint64 last_modified = *(gint64 *)value; info = g_object_new (SEAFILE_TYPE_FILE_LAST_MODIFIED_INFO, "file_name", key, "last_modified", last_modified, NULL); ret_list = g_list_prepend (ret_list, info); } out: if (repo) seaf_repo_unref (repo); if (head_commit) seaf_commit_unref(head_commit); if (data.last_modified_hash) g_hash_table_destroy (data.last_modified_hash); if (data.current_file_id_hash) g_hash_table_destroy (data.current_file_id_hash); if (dir) seaf_dir_free (dir); return g_list_reverse(ret_list); } int seaf_repo_manager_revert_on_server (SeafRepoManager *mgr, const char *repo_id, const char *commit_id, const char *user_name, GError **error) { SeafRepo *repo; SeafCommit *commit = NULL, *new_commit = NULL; char desc[512]; int ret = 0; retry: repo = seaf_repo_manager_get_repo (mgr, repo_id); if (!repo) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "No such repo"); return -1; } commit = seaf_commit_manager_get_commit (seaf->commit_mgr, repo->id, repo->version, commit_id); if (!commit) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "Commit doesn't exist"); ret = -1; goto out; } #ifndef WIN32 strftime (desc, sizeof(desc), "Reverted repo to status at %F %T.", localtime((time_t *)(&commit->ctime))); #else strftime (desc, sizeof(desc), "Reverted repo to status at %Y-%m-%d %H:%M:%S.", localtime((time_t *)(&commit->ctime))); #endif new_commit = seaf_commit_new (NULL, repo->id, commit->root_id, user_name, EMPTY_SHA1, desc, 0); new_commit->parent_id = g_strdup (repo->head->commit_id); seaf_repo_to_commit (repo, new_commit); if (seaf_commit_manager_add_commit (seaf->commit_mgr, new_commit) < 0) { ret = -1; goto out; } seaf_branch_set_commit (repo->head, new_commit->commit_id); if (seaf_branch_manager_test_and_update_branch (seaf->branch_mgr, repo->head, new_commit->parent_id, FALSE, NULL, NULL, NULL) < 0) { seaf_repo_unref (repo); seaf_commit_unref (commit); seaf_commit_unref (new_commit); repo = NULL; commit = new_commit = NULL; goto retry; } seaf_repo_manager_merge_virtual_repo (mgr, repo_id, NULL); out: if (new_commit) seaf_commit_unref (new_commit); if (commit) seaf_commit_unref (commit); if (repo) seaf_repo_unref (repo); if (ret == 0) { update_repo_size (repo_id); } return ret; } static void add_deleted_entry (SeafRepo *repo, GHashTable *entries, SeafDirent *dent, const char *base, SeafCommit *child, SeafCommit *parent) { char *path = g_strconcat (base, dent->name, NULL); SeafileDeletedEntry *entry; Seafile *file; if (g_hash_table_lookup (entries, path) != NULL) { /* g_debug ("found dup deleted entry for %s.\n", path); */ g_free (path); return; } /* g_debug ("Add deleted entry for %s.\n", path); */ entry = g_object_new (SEAFILE_TYPE_DELETED_ENTRY, "commit_id", parent->commit_id, "obj_id", dent->id, "obj_name", dent->name, "basedir", base, "mode", dent->mode, "delete_time", child->ctime, NULL); if (S_ISREG(dent->mode)) { file = seaf_fs_manager_get_seafile (seaf->fs_mgr, repo->store_id, repo->version, dent->id); if (!file) { g_free (path); g_object_unref (entry); return; } g_object_set (entry, "file_size", file->file_size, NULL); seafile_unref (file); } g_hash_table_insert (entries, path, entry); } static int find_deleted_recursive (SeafRepo *repo, SeafDir *d1, SeafDir *d2, const char *base, SeafCommit *child, SeafCommit *parent, GHashTable *entries) { GList *p1, *p2; SeafDirent *dent1, *dent2; int res, ret = 0; p1 = d1->entries; p2 = d2->entries; /* Since dirents are sorted in descending order, we can use merge * algorithm to find out deleted entries. * Deleted entries are those: * 1. exists in d2 but absent in d1. * 2. exists in both d1 and d2 but with different type. */ while (p1 && p2) { dent1 = p1->data; dent2 = p2->data; res = g_strcmp0 (dent1->name, dent2->name); if (res < 0) { /* exists in d2 but absent in d1. */ add_deleted_entry (repo, entries, dent2, base, child, parent); p2 = p2->next; } else if (res == 0) { if ((dent1->mode & S_IFMT) != (dent2->mode & S_IFMT)) { /* both exists but with diffent type. */ add_deleted_entry (repo, entries, dent2, base, child, parent); } else if (S_ISDIR(dent1->mode) && strcmp(dent1->id, dent2->id) != 0) { SeafDir *n1 = seaf_fs_manager_get_seafdir_sorted (seaf->fs_mgr, repo->store_id, repo->version, dent1->id); if (!n1) { seaf_warning ("Failed to find dir %s:%s.\n", repo->id, dent1->id); return -1; } SeafDir *n2 = seaf_fs_manager_get_seafdir_sorted (seaf->fs_mgr, repo->store_id, repo->version, dent2->id); if (!n2) { seaf_warning ("Failed to find dir %s:%s.\n", repo->id, dent2->id); seaf_dir_free (n1); return -1; } char *new_base = g_strconcat (base, dent1->name, "/", NULL); ret = find_deleted_recursive (repo, n1, n2, new_base, child, parent, entries); g_free (new_base); seaf_dir_free (n1); seaf_dir_free (n2); if (ret < 0) return ret; } p1 = p1->next; p2 = p2->next; } else { p1 = p1->next; } } for ( ; p2 != NULL; p2 = p2->next) { dent2 = p2->data; add_deleted_entry (repo, entries, dent2, base, child, parent); } return ret; } static int find_deleted (SeafRepo *repo, SeafCommit *child, SeafCommit *parent, const char *base, GHashTable *entries) { SeafDir *d1, *d2; int ret = 0; d1 = seaf_fs_manager_get_seafdir_sorted_by_path (seaf->fs_mgr, repo->store_id, repo->version, child->root_id, base); if (!d1) { return ret; } d2 = seaf_fs_manager_get_seafdir_sorted_by_path (seaf->fs_mgr, repo->store_id, repo->version, parent->root_id, base); if (!d2) { seaf_dir_free (d1); return ret; } ret = find_deleted_recursive (repo, d1, d2, base, child, parent, entries); seaf_dir_free (d2); seaf_dir_free (d1); return ret; } typedef struct CollectDelData { SeafRepo *repo; GHashTable *entries; gint64 truncate_time; char *path; } CollectDelData; #define DEFAULT_RECYCLE_DAYS 7 static gboolean collect_deleted (SeafCommit *commit, void *vdata, gboolean *stop) { CollectDelData *data = vdata; SeafRepo *repo = data->repo; GHashTable *entries = data->entries; gint64 truncate_time = data->truncate_time; SeafCommit *p1, *p2; /* We use <= here. This is for handling clean trash and history. * If the user cleans all history, truncate time will be equal to * the head commit's ctime. In such case, we don't actually want to display * any deleted file. */ if ((gint64)(commit->ctime) <= truncate_time) { *stop = TRUE; return TRUE; } if (commit->parent_id == NULL) return TRUE; if (!(strstr (commit->desc, PREFIX_DEL_FILE) != NULL || strstr (commit->desc, PREFIX_DEL_DIR) != NULL || strstr (commit->desc, PREFIX_DEL_DIRS) != NULL)) { return TRUE; } p1 = seaf_commit_manager_get_commit (commit->manager, repo->id, repo->version, commit->parent_id); if (!p1) { seaf_warning ("Failed to find commit %s:%s.\n", repo->id, commit->parent_id); return FALSE; } if (find_deleted (data->repo, commit, p1, data->path, entries) < 0) { seaf_commit_unref (p1); return FALSE; } seaf_commit_unref (p1); if (commit->second_parent_id) { p2 = seaf_commit_manager_get_commit (commit->manager, repo->id, repo->version, commit->second_parent_id); if (!p2) { seaf_warning ("Failed to find commit %s:%s.\n", repo->id, commit->second_parent_id); return FALSE; } if (find_deleted (data->repo, commit, p2, data->path, entries) < 0) { seaf_commit_unref (p2); return FALSE; } seaf_commit_unref (p2); } return TRUE; } typedef struct RemoveExistingParam { SeafRepo *repo; SeafCommit *head; } RemoveExistingParam; static gboolean remove_existing (gpointer key, gpointer value, gpointer user_data) { SeafileDeletedEntry *e = value; RemoveExistingParam *param = user_data; SeafRepo *repo = param->repo; SeafCommit *head = param->head; guint32 mode = seafile_deleted_entry_get_mode(e), mode_out = 0; char *path = key; char *obj_id = seaf_fs_manager_path_to_obj_id (seaf->fs_mgr, repo->store_id, repo->version, head->root_id, path, &mode_out, NULL); if (obj_id == NULL) return FALSE; g_free (obj_id); /* If path exist in head commit and with the same type, * remove it from deleted entries. */ if ((mode & S_IFMT) == (mode_out & S_IFMT)) { /* g_debug ("%s exists in head commit.\n", path); */ return TRUE; } return FALSE; } static int filter_out_existing_entries (GHashTable *entries, SeafRepo *repo, const char *head_id) { SeafCommit *head; RemoveExistingParam param; head = seaf_commit_manager_get_commit (seaf->commit_mgr, repo->id, repo->version, head_id); if (!head) { seaf_warning ("Failed to find head commit %s of repo %s.\n", head_id, repo->id); return -1; } param.repo = repo; param.head = head; g_hash_table_foreach_remove (entries, remove_existing, ¶m); seaf_commit_unref (head); return 0; } static gboolean hash_to_list (gpointer key, gpointer value, gpointer user_data) { GList **plist = (GList **)user_data; g_free (key); *plist = g_list_prepend (*plist, value); return TRUE; } static gint compare_commit_by_time_ex (gconstpointer a, gconstpointer b) { const SeafCommit *commit_a = a; const SeafCommit *commit_b = b; /* Latest commit comes first in the list. */ return (commit_b->ctime - commit_a->ctime); } static gint compare_commit_by_time (gconstpointer a, gconstpointer b, gpointer unused) { return compare_commit_by_time_ex(a, b); } static int insert_parent_commit (GList **list, GHashTable *hash, const char *repo_id, int version, const char *parent_id) { SeafCommit *p; char *key; if (g_hash_table_lookup (hash, parent_id) != NULL) return 0; p = seaf_commit_manager_get_commit (seaf->commit_mgr, repo_id, version, parent_id); if (!p) { seaf_warning ("Failed to find commit %s\n", parent_id); return -1; } *list = g_list_insert_sorted_with_data (*list, p, compare_commit_by_time, NULL); key = g_strdup (parent_id); g_hash_table_replace (hash, key, key); return 0; } static GList * scan_stat_to_list(const char *scan_stat, GHashTable *commit_hash, SeafRepo *repo) { json_t *commit_array = NULL, *commit_obj = NULL; char *commit_id = NULL; SeafCommit *commit = NULL; GList *list = NULL; char *key; commit_array = json_loadb (scan_stat, strlen(scan_stat), 0, NULL); if (!commit_array) { return NULL; } int i; for (i = 0; i < json_array_size (commit_array); i++) { commit_obj = json_array_get (commit_array, i); commit_id = json_string_value (commit_obj); if (commit_id && strlen(commit_id) == 40) { commit = seaf_commit_manager_get_commit (seaf->commit_mgr, repo->id, repo->version, commit_id); if (!commit) { return NULL; } list = g_list_prepend (list, commit); key = g_strdup (commit->commit_id); g_hash_table_replace (commit_hash, key, key); } } json_decref (commit_array); list = g_list_sort (list, compare_commit_by_time_ex); return list; } static int scan_commits_for_collect_deleted (CollectDelData *data, const char *prev_scan_stat, int limit, char **next_scan_stat) { GList *list = NULL; SeafCommit *commit; GHashTable *commit_hash; SeafRepo *repo = data->repo; int scan_num = 0; gboolean ret = TRUE; /* A hash table for recording id of traversed commits. */ commit_hash = g_hash_table_new_full (g_str_hash, g_str_equal, g_free, NULL); if (prev_scan_stat == NULL) { commit = seaf_commit_manager_get_commit (seaf->commit_mgr, repo->id, repo->version, repo->head->commit_id); if (!commit) { ret = FALSE; goto out; } list = g_list_prepend (list, commit); char *key = g_strdup (commit->commit_id); g_hash_table_replace (commit_hash, key, key); } else { list = scan_stat_to_list (prev_scan_stat, commit_hash, repo); if (list == NULL) { ret = FALSE; goto out; } } while (list) { gboolean stop = FALSE; commit = list->data; list = g_list_delete_link (list, list); if (!collect_deleted (commit, data, &stop)) { seaf_warning("[comit-mgr] CommitTraverseFunc failed\n"); seaf_commit_unref (commit); ret = FALSE; goto out; } if (stop) { seaf_commit_unref (commit); /* stop traverse down from this commit, * but not stop traversing the tree */ continue; } if (commit->parent_id) { if (insert_parent_commit (&list, commit_hash, repo->id, repo->version, commit->parent_id) < 0) { seaf_warning("[comit-mgr] insert parent commit failed\n"); seaf_commit_unref (commit); ret = FALSE; goto out; } } if (commit->second_parent_id) { if (insert_parent_commit (&list, commit_hash, repo->id, repo->version, commit->second_parent_id) < 0) { seaf_warning("[comit-mgr]insert second parent commit failed\n"); seaf_commit_unref (commit); ret = FALSE; goto out; } } seaf_commit_unref (commit); if (++scan_num >= limit) { break; } } json_t *commit_array = json_array (); while (list) { commit = list->data; json_array_append_new (commit_array, json_string (commit->commit_id)); seaf_commit_unref (commit); list = g_list_delete_link (list, list); } if (json_array_size(commit_array) > 0) { char *commits = json_dumps (commit_array, JSON_COMPACT); *next_scan_stat = commits; } json_decref (commit_array); g_hash_table_destroy (commit_hash); return ret; out: g_hash_table_destroy (commit_hash); while (list) { commit = list->data; seaf_commit_unref (commit); list = g_list_delete_link (list, list); } return ret; } GList * seaf_repo_manager_get_deleted_entries (SeafRepoManager *mgr, const char *repo_id, int show_days, const char *path, const char *scan_stat, int limit, GError **error) { SeafRepo *repo; gint64 truncate_time, show_time; GList *ret = NULL; char *next_scan_stat = NULL; truncate_time = seaf_repo_manager_get_repo_truncate_time (mgr, repo_id); if (truncate_time == 0) { // Don't keep history, set scan_stat as NULL, indicate no need for next scan ret = g_list_append (ret, g_object_new (SEAFILE_TYPE_DELETED_ENTRY, "scan_stat", NULL, NULL)); return ret; } if (show_days <= 0) show_time = -1; else show_time = (gint64)time(NULL) - show_days * 24 * 3600; repo = seaf_repo_manager_get_repo (mgr, repo_id); if (!repo) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "Invalid repo id"); return NULL; } CollectDelData data = {0}; GHashTable *entries = g_hash_table_new_full (g_str_hash, g_str_equal, g_free, g_object_unref); data.repo = repo; data.entries = entries; data.truncate_time = MAX (show_time, truncate_time); if (path) { if (path[strlen(path) - 1] == '/') { data.path = g_strdup (path); } else { data.path = g_strconcat (path, "/", NULL); } } else { data.path = g_strdup ("/"); } if (!scan_commits_for_collect_deleted (&data, scan_stat, limit, &next_scan_stat)) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_INTERNAL, "Internal error"); g_hash_table_destroy (entries); seaf_repo_unref (repo); g_free (data.path); g_free (next_scan_stat); return NULL; } /* Remove entries exist in the current commit. * This is necessary because some files may be added back after deletion. */ if (filter_out_existing_entries (entries, repo, repo->head->commit_id) == 0) { // filter success, then add collected result to list g_hash_table_foreach_steal (entries, hash_to_list, &ret); } // Append scan_stat entry to the end to indicate the end of scan result ret = g_list_append (ret, g_object_new (SEAFILE_TYPE_DELETED_ENTRY, "scan_stat", next_scan_stat, NULL)); g_hash_table_destroy (entries); seaf_repo_unref (repo); g_free (data.path); g_free (next_scan_stat); return ret; } static SeafCommit * get_commit(SeafRepo *repo, const char *branch_or_commit) { SeafBranch *b; SeafCommit *c; b = seaf_branch_manager_get_branch (seaf->branch_mgr, repo->id, branch_or_commit); if (!b) { if (strcmp(branch_or_commit, "HEAD") == 0) c = seaf_commit_manager_get_commit (seaf->commit_mgr, repo->id, repo->version, repo->head->commit_id); else c = seaf_commit_manager_get_commit (seaf->commit_mgr, repo->id, repo->version, branch_or_commit); } else { c = seaf_commit_manager_get_commit (seaf->commit_mgr, repo->id, repo->version, b->commit_id); } if (b) seaf_branch_unref (b); return c; } GList * seaf_repo_diff (SeafRepo *repo, const char *old, const char *new, int fold_dir_results, char **error) { SeafCommit *c1 = NULL, *c2 = NULL; int ret = 0; GList *diff_entries = NULL; g_return_val_if_fail (*error == NULL, NULL); c2 = get_commit (repo, new); if (!c2) { *error = g_strdup("Can't find new commit"); return NULL; } if (old == NULL || old[0] == '\0') { if (c2->parent_id && c2->second_parent_id) { ret = diff_merge (c2, &diff_entries, fold_dir_results); seaf_commit_unref (c2); if (ret < 0) { *error = g_strdup("Failed to do diff"); g_list_free_full (diff_entries, (GDestroyNotify)diff_entry_free); return NULL; } return diff_entries; } if (!c2->parent_id) { seaf_commit_unref (c2); return NULL; } c1 = seaf_commit_manager_get_commit (seaf->commit_mgr, repo->id, repo->version, c2->parent_id); } else { c1 = get_commit (repo, old); } if (!c1) { *error = g_strdup("Can't find old commit"); seaf_commit_unref (c2); return NULL; } /* do diff */ ret = diff_commits (c1, c2, &diff_entries, fold_dir_results); if (ret < 0) { g_list_free_full (diff_entries, (GDestroyNotify)diff_entry_free); diff_entries = NULL; *error = g_strdup("Failed to do diff"); } seaf_commit_unref (c1); seaf_commit_unref (c2); return diff_entries; } ================================================ FILE: server/repo-perm.c ================================================ /* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */ #include "common.h" #include "utils.h" #include "log.h" #include "seafile-session.h" #include "repo-mgr.h" #include "seafile-error.h" #include "seaf-utils.h" /* * Permission priority: owner --> personal share --> group share --> public. * Permission with higher priority overwrites those with lower priority. */ static gboolean check_repo_share_perm_cb (SeafDBRow *row, void *data) { char **orig_perm = data; char *perm = g_strdup (seaf_db_row_get_column_text (row, 0)); if (g_strcmp0(perm, "rw") == 0) { g_free (*orig_perm); *orig_perm = perm; return FALSE; } else if (g_strcmp0(perm, "r") == 0 && !(*orig_perm)) { *orig_perm = perm; return TRUE; } g_free (perm); return TRUE; } static char * check_group_permission_by_user (SeafRepoManager *mgr, const char *repo_id, const char *user_name) { char *permission = NULL; GList *groups = NULL, *p1; CcnetGroup *group; int group_id; GString *sql; /* Get the groups this user belongs to. */ groups = ccnet_group_manager_get_groups_by_user (seaf->group_mgr, user_name, 1, NULL); if (!groups) { goto out; } sql = g_string_new (""); g_string_printf (sql, "SELECT permission FROM RepoGroup WHERE repo_id = ? AND group_id IN ("); for (p1 = groups; p1 != NULL; p1 = p1->next) { group = p1->data; g_object_get (group, "id", &group_id, NULL); g_string_append_printf (sql, "%d", group_id); if (p1->next) g_string_append_printf (sql, ","); } g_string_append_printf (sql, ")"); if (seaf_db_statement_foreach_row (mgr->seaf->db, sql->str, check_repo_share_perm_cb, &permission, 1, "string", repo_id) < 0) { seaf_warning ("DB error when get repo share permission for repo %s.\n", repo_id); } g_string_free (sql, TRUE); out: for (p1 = groups; p1 != NULL; p1 = p1->next) g_object_unref ((GObject *)p1->data); g_list_free (groups); return permission; } static char * check_repo_share_permission (SeafRepoManager *mgr, const char *repo_id, const char *user_name) { char *permission; permission = seaf_share_manager_check_permission (seaf->share_mgr, repo_id, user_name); if (permission != NULL) return permission; permission = check_group_permission_by_user (mgr, repo_id, user_name); if (permission != NULL) return permission; if (!mgr->seaf->cloud_mode) return seaf_repo_manager_get_inner_pub_repo_perm (mgr, repo_id); return NULL; } // get dir perm from all dir perms in parent repo // such as path /a/b, then check /a/b, /a in parent static char * get_dir_perm (GHashTable *perms, const char *path) { char *tmp = g_strdup (path); char *slash; char *perm = NULL; while (g_strcmp0 (tmp, "") != 0) { perm = g_hash_table_lookup (perms, tmp); if (perm) break; slash = g_strrstr (tmp, "/"); *slash = '\0'; } g_free (tmp); return g_strdup (perm); } static char * check_perm_on_parent_repo (const char *origin_repo_id, const char *user, const char *vpath) { GHashTable *user_perms = NULL; GHashTable *group_perms = NULL; GList *groups = NULL; GList *iter; char *perm = NULL; user_perms = seaf_share_manager_get_shared_dirs_to_user (seaf->share_mgr, origin_repo_id, user); if (!user_perms) { return NULL; } if (g_hash_table_size (user_perms) > 0) { perm = get_dir_perm (user_perms, vpath); if (perm) { g_hash_table_destroy (user_perms); return perm; } } g_hash_table_destroy (user_perms); groups = ccnet_group_manager_get_groups_by_user (seaf->group_mgr, user, 1, NULL); if (!groups) { return NULL; } group_perms = seaf_share_manager_get_shared_dirs_to_group (seaf->share_mgr, origin_repo_id, groups); for (iter = groups; iter; iter = iter->next) g_object_unref ((GObject *)iter->data); g_list_free (groups); if (!group_perms) { return NULL; } if (g_hash_table_size (group_perms) > 0) { perm = get_dir_perm (group_perms, vpath); } g_hash_table_destroy (group_perms); return perm; } static char * check_virtual_repo_permission (SeafRepoManager *mgr, const char *repo_id, const char *origin_repo_id, const char *user, const char *vpath) { char *owner = NULL; char *permission = NULL; /* If I'm the owner of origin repo, I have full access to sub-repos. */ owner = seaf_repo_manager_get_repo_owner (mgr, origin_repo_id); if (g_strcmp0 (user, owner) == 0) { g_free (owner); permission = g_strdup("rw"); return permission; } g_free (owner); /* If I'm not the owner of origin repo, this sub-repo can be created * from a shared repo by me or directly shared by others to me. * The priority of shared sub-folder is higher than top-level repo. */ permission = check_perm_on_parent_repo (origin_repo_id, user, vpath); if (permission) { return permission; } permission = check_repo_share_permission (mgr, origin_repo_id, user); return permission; } /* * Comprehensive repo access permission checker. * * Returns read/write permission. */ char * seaf_repo_manager_check_permission (SeafRepoManager *mgr, const char *repo_id, const char *user, GError **error) { SeafVirtRepo *vinfo; char *owner = NULL; char *permission = NULL; /* This is a virtual repo.*/ vinfo = seaf_repo_manager_get_virtual_repo_info (mgr, repo_id); if (vinfo) { permission = check_virtual_repo_permission (mgr, repo_id, vinfo->origin_repo_id, user, vinfo->path); goto out; } owner = seaf_repo_manager_get_repo_owner (mgr, repo_id); if (owner != NULL) { if (strcmp (owner, user) == 0) permission = g_strdup("rw"); else permission = check_repo_share_permission (mgr, repo_id, user); } out: seaf_virtual_repo_info_free (vinfo); g_free (owner); return permission; } /* * Directories are always before files. Otherwise compare the names. */ static gint comp_dirent_func (gconstpointer a, gconstpointer b) { const SeafDirent *dent_a = a, *dent_b = b; if (S_ISDIR(dent_a->mode) && S_ISREG(dent_b->mode)) return -1; if (S_ISREG(dent_a->mode) && S_ISDIR(dent_b->mode)) return 1; return strcasecmp (dent_a->name, dent_b->name); } GList * seaf_repo_manager_list_dir_with_perm (SeafRepoManager *mgr, const char *repo_id, const char *dir_path, const char *dir_id, const char *user, int offset, int limit, GError **error) { SeafRepo *repo; char *perm = NULL; SeafDir *dir; SeafDirent *dent; SeafileDirent *d; GList *res = NULL; GList *p; perm = seaf_repo_manager_check_permission (mgr, repo_id, user, error); if (!perm) { if (*error == NULL) g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Access denied"); return NULL; } repo = seaf_repo_manager_get_repo (seaf->repo_mgr, repo_id); if (!repo) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Bad repo id"); g_free (perm); return NULL; } dir = seaf_fs_manager_get_seafdir (seaf->fs_mgr, repo->store_id, repo->version, dir_id); if (!dir) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_DIR_ID, "Bad dir id"); seaf_repo_unref (repo); g_free (perm); return NULL; } dir->entries = g_list_sort (dir->entries, comp_dirent_func); if (offset < 0) { offset = 0; } int index = 0; gboolean is_shared; char *cur_path; GHashTable *shared_sub_dirs = NULL; if (!repo->virtual_info) { char *repo_owner = seaf_repo_manager_get_repo_owner (seaf->repo_mgr, repo_id); if (repo_owner && strcmp (user, repo_owner) == 0) { shared_sub_dirs = seaf_share_manager_get_shared_sub_dirs (seaf->share_mgr, repo->store_id, dir_path); } g_free (repo_owner); } for (p = dir->entries; p != NULL; p = p->next, index++) { if (index < offset) { continue; } if (limit > 0) { if (index >= offset + limit) break; } dent = p->data; if (!is_object_id_valid (dent->id)) continue; d = g_object_new (SEAFILE_TYPE_DIRENT, "obj_id", dent->id, "obj_name", dent->name, "mode", dent->mode, "version", dent->version, "mtime", dent->mtime, "size", dent->size, "permission", perm, "modifier", dent->modifier, NULL); if (shared_sub_dirs && S_ISDIR(dent->mode)) { if (strcmp (dir_path, "/") == 0) { cur_path = g_strconcat (dir_path, dent->name, NULL); } else { cur_path = g_strconcat (dir_path, "/", dent->name, NULL); } is_shared = g_hash_table_lookup (shared_sub_dirs, cur_path) ? TRUE : FALSE; g_free (cur_path); g_object_set (d, "is_shared", is_shared, NULL); } res = g_list_prepend (res, d); } if (shared_sub_dirs) g_hash_table_destroy (shared_sub_dirs); seaf_dir_free (dir); seaf_repo_unref (repo); g_free (perm); if (res) res = g_list_reverse (res); return res; } ================================================ FILE: server/seaf-server.c ================================================ /* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */ #include "common.h" #if defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__) #include #else #include #endif #include #include #include #include #include #include #include #include #include #include #include "seafile-session.h" #include "seafile-rpc.h" #include "log.h" #include "utils.h" #include "cdc/cdc.h" SeafileSession *seaf; char *pidfile = NULL; static const char *short_options = "hvc:d:l:fP:D:F:p:tr:"; static struct option long_options[] = { { "help", no_argument, NULL, 'h', }, { "version", no_argument, NULL, 'v', }, { "config-file", required_argument, NULL, 'c' }, { "central-config-dir", required_argument, NULL, 'F' }, { "seafdir", required_argument, NULL, 'd' }, { "log", required_argument, NULL, 'l' }, { "debug", required_argument, NULL, 'D' }, { "foreground", no_argument, NULL, 'f' }, { "pidfile", required_argument, NULL, 'P' }, { "rpc-pipe-path", required_argument, NULL, 'p' }, { "test-config", no_argument, NULL, 't' }, { "repair-repo", required_argument, NULL, 'r' }, { NULL, 0, NULL, 0, }, }; static void usage () { fprintf (stderr, "usage: seaf-server [-c config_dir] [-d seafile_dir]\n"); } #include #include "searpc-signature.h" #include "searpc-marshal.h" #include #define SEAFILE_RPC_PIPE_NAME "seafile.sock" #define NAMED_PIPE_SERVER_THREAD_POOL_SIZE 50 static void start_rpc_service (const char *seafile_dir, const char *rpc_pipe_path) { SearpcNamedPipeServer *rpc_server = NULL; char *pipe_path = NULL; searpc_server_init (register_marshals); searpc_create_service ("seafserv-threaded-rpcserver"); /* threaded services */ /* repo manipulation */ searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_get_repos_by_id_prefix, "seafile_get_repos_by_id_prefix", searpc_signature_objlist__string_int_int()); searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_get_repo, "seafile_get_repo", searpc_signature_object__string()); searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_destroy_repo, "seafile_destroy_repo", searpc_signature_int__string()); searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_get_repo_list, "seafile_get_repo_list", searpc_signature_objlist__int_int_string_int()); searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_count_repos, "seafile_count_repos", searpc_signature_int64__void()); searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_set_repo_owner, "seafile_set_repo_owner", searpc_signature_int__string_string()); searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_get_repo_owner, "seafile_get_repo_owner", searpc_signature_string__string()); searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_get_orphan_repo_list, "seafile_get_orphan_repo_list", searpc_signature_objlist__void()); searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_edit_repo, "seafile_edit_repo", searpc_signature_int__string_string_string_string()); searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_change_repo_passwd, "seafile_change_repo_passwd", searpc_signature_int__string_string_string_string()); searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_upgrade_repo_pwd_hash_algorithm, "seafile_upgrade_repo_pwd_hash_algorithm", searpc_signature_int__string_string_string_string_string()); searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_is_repo_owner, "seafile_is_repo_owner", searpc_signature_int__string_string()); searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_list_owned_repos, "seafile_list_owned_repos", searpc_signature_objlist__string_int_int_int()); searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_search_repos_by_name, "seafile_search_repos_by_name", searpc_signature_objlist__string()); searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_server_repo_size, "seafile_server_repo_size", searpc_signature_int64__string()); searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_repo_set_access_property, "seafile_repo_set_access_property", searpc_signature_int__string_string()); searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_repo_query_access_property, "seafile_repo_query_access_property", searpc_signature_string__string()); searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_revert_on_server, "seafile_revert_on_server", searpc_signature_int__string_string_string()); searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_diff, "seafile_diff", searpc_signature_objlist__string_string_string_int()); searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_post_file, "seafile_post_file", searpc_signature_int__string_string_string_string_string()); /* searpc_server_register_function ("seafserv-threaded-rpcserver", */ /* seafile_post_file_blocks, */ /* "seafile_post_file_blocks", */ /* searpc_signature_string__string_string_string_string_string_string_int64_int()); */ searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_post_multi_files, "seafile_post_multi_files", searpc_signature_string__string_string_string_string_string_int()); searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_put_file, "seafile_put_file", searpc_signature_string__string_string_string_string_string_string()); /* searpc_server_register_function ("seafserv-threaded-rpcserver", */ /* seafile_put_file_blocks, */ /* "seafile_put_file_blocks", */ /* searpc_signature_string__string_string_string_string_string_string_string_int64()); */ searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_post_empty_file, "seafile_post_empty_file", searpc_signature_int__string_string_string_string()); searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_post_dir, "seafile_post_dir", searpc_signature_int__string_string_string_string()); searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_mkdir_with_parents, "seafile_mkdir_with_parents", searpc_signature_int__string_string_string_string()); searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_del_file, "seafile_del_file", searpc_signature_int__string_string_string_string()); searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_batch_del_files, "seafile_batch_del_files", searpc_signature_int__string_string_string()); searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_copy_file, "seafile_copy_file", searpc_signature_object__string_string_string_string_string_string_string_int_int()); searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_move_file, "seafile_move_file", searpc_signature_object__string_string_string_string_string_string_int_string_int_int()); searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_rename_file, "seafile_rename_file", searpc_signature_int__string_string_string_string_string()); searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_is_valid_filename, "seafile_is_valid_filename", searpc_signature_int__string_string()); searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_create_repo, "seafile_create_repo", searpc_signature_string__string_string_string_string_int_string_string()); searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_create_enc_repo, "seafile_create_enc_repo", searpc_signature_string__string_string_string_string_string_string_string_int_string_string_string()); searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_get_commit, "seafile_get_commit", searpc_signature_object__string_int_string()); searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_list_dir, "seafile_list_dir", searpc_signature_objlist__string_string_int_int()); searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_list_dir_with_perm, "list_dir_with_perm", searpc_signature_objlist__string_string_string_string_int_int()); searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_list_file_blocks, "seafile_list_file_blocks", searpc_signature_string__string_string_int_int()); searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_get_file_size, "seafile_get_file_size", searpc_signature_int64__string_int_string()); searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_get_dir_size, "seafile_get_dir_size", searpc_signature_int64__string_int_string()); searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_list_dir_by_path, "seafile_list_dir_by_path", searpc_signature_objlist__string_string_string()); searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_get_dir_id_by_commit_and_path, "seafile_get_dir_id_by_commit_and_path", searpc_signature_string__string_string_string()); searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_get_file_id_by_path, "seafile_get_file_id_by_path", searpc_signature_string__string_string()); searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_get_dir_id_by_path, "seafile_get_dir_id_by_path", searpc_signature_string__string_string()); searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_get_dirent_by_path, "seafile_get_dirent_by_path", searpc_signature_object__string_string()); searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_list_file_revisions, "seafile_list_file_revisions", searpc_signature_objlist__string_string_string_int()); searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_calc_files_last_modified, "seafile_calc_files_last_modified", searpc_signature_objlist__string_string_int()); searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_revert_file, "seafile_revert_file", searpc_signature_int__string_string_string_string()); searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_revert_dir, "seafile_revert_dir", searpc_signature_int__string_string_string_string()); searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_check_repo_blocks_missing, "seafile_check_repo_blocks_missing", searpc_signature_string__string_string()); searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_get_deleted, "get_deleted", searpc_signature_objlist__string_int_string_string_int()); searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_get_total_file_number, "get_total_file_number", searpc_signature_int64__void()); searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_get_total_storage, "get_total_storage", searpc_signature_int64__void()); searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_get_file_count_info_by_path, "get_file_count_info_by_path", searpc_signature_object__string_string()); searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_get_trash_repo_owner, "get_trash_repo_owner", searpc_signature_string__string()); searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_convert_repo_path, "convert_repo_path", searpc_signature_string__string_string_string_int()); searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_search_files, "search_files", searpc_signature_objlist__string_string()); searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_search_files_by_path, "search_files_by_path", searpc_signature_objlist__string_string_string()); /* share repo to user */ searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_add_share, "seafile_add_share", searpc_signature_int__string_string_string_string()); searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_list_share_repos, "seafile_list_share_repos", searpc_signature_objlist__string_string_int_int()); searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_list_repo_shared_to, "seafile_list_repo_shared_to", searpc_signature_objlist__string_string()); searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_remove_share, "seafile_remove_share", searpc_signature_int__string_string_string()); searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_set_share_permission, "set_share_permission", searpc_signature_int__string_string_string_string()); searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_share_subdir_to_user, "share_subdir_to_user", searpc_signature_string__string_string_string_string_string_string()); searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_unshare_subdir_for_user, "unshare_subdir_for_user", searpc_signature_int__string_string_string_string()); searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_update_share_subdir_perm_for_user, "update_share_subdir_perm_for_user", searpc_signature_int__string_string_string_string_string()); searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_get_shared_repo_by_path, "get_shared_repo_by_path", searpc_signature_object__string_string_string_int()); searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_get_shared_users_by_repo, "get_shared_users_by_repo", searpc_signature_objlist__string()); searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_org_get_shared_users_by_repo, "org_get_shared_users_by_repo", searpc_signature_objlist__int_string()); /* share repo to group */ searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_group_share_repo, "seafile_group_share_repo", searpc_signature_int__string_int_string_string()); searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_group_unshare_repo, "seafile_group_unshare_repo", searpc_signature_int__string_int_string()); searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_get_shared_groups_by_repo, "seafile_get_shared_groups_by_repo", searpc_signature_string__string()); searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_share_subdir_to_group, "share_subdir_to_group", searpc_signature_string__string_string_string_int_string_string()); searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_unshare_subdir_for_group, "unshare_subdir_for_group", searpc_signature_int__string_string_string_int()); searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_update_share_subdir_perm_for_group, "update_share_subdir_perm_for_group", searpc_signature_int__string_string_string_int_string()); searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_get_group_repoids, "seafile_get_group_repoids", searpc_signature_string__int()); searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_list_repo_shared_group, "seafile_list_repo_shared_group", searpc_signature_objlist__string_string()); searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_get_group_shared_repo_by_path, "get_group_shared_repo_by_path", searpc_signature_object__string_string_int_int()); searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_get_group_repos_by_user, "get_group_repos_by_user", searpc_signature_objlist__string()); searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_get_org_group_repos_by_user, "get_org_group_repos_by_user", searpc_signature_objlist__string_int()); searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_get_repos_by_group, "seafile_get_repos_by_group", searpc_signature_objlist__int()); searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_get_group_repos_by_owner, "get_group_repos_by_owner", searpc_signature_objlist__string()); searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_get_group_repo_owner, "get_group_repo_owner", searpc_signature_string__string()); searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_remove_repo_group, "seafile_remove_repo_group", searpc_signature_int__int_string()); searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_set_group_repo_permission, "set_group_repo_permission", searpc_signature_int__int_string_string()); searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_get_shared_users_for_subdir, "seafile_get_shared_users_for_subdir", searpc_signature_objlist__string_string_string()); searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_get_shared_groups_for_subdir, "seafile_get_shared_groups_for_subdir", searpc_signature_objlist__string_string_string()); searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_repo_has_been_shared, "repo_has_been_shared", searpc_signature_int__string_int()); /* branch and commit */ searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_branch_gets, "seafile_branch_gets", searpc_signature_objlist__string()); searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_get_commit_list, "seafile_get_commit_list", searpc_signature_objlist__string_int_int()); /* token */ searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_generate_repo_token, "seafile_generate_repo_token", searpc_signature_string__string_string()); searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_delete_repo_token, "seafile_delete_repo_token", searpc_signature_int__string_string_string()); searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_list_repo_tokens, "seafile_list_repo_tokens", searpc_signature_objlist__string()); searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_list_repo_tokens_by_email, "seafile_list_repo_tokens_by_email", searpc_signature_objlist__string()); searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_delete_repo_tokens_by_peer_id, "seafile_delete_repo_tokens_by_peer_id", searpc_signature_int__string_string()); searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_delete_repo_tokens_by_email, "delete_repo_tokens_by_email", searpc_signature_int__string()); /* quota */ searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_get_user_quota_usage, "seafile_get_user_quota_usage", searpc_signature_int64__string()); searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_get_user_share_usage, "seafile_get_user_share_usage", searpc_signature_int64__string()); /* virtual repo */ searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_create_virtual_repo, "create_virtual_repo", searpc_signature_string__string_string_string_string_string_string()); searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_get_virtual_repos_by_owner, "get_virtual_repos_by_owner", searpc_signature_objlist__string()); searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_get_virtual_repo, "get_virtual_repo", searpc_signature_object__string_string_string()); searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_get_upload_tmp_file_offset, "seafile_get_upload_tmp_file_offset", searpc_signature_int64__string_string()); /* Clean trash */ searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_clean_up_repo_history, "clean_up_repo_history", searpc_signature_int__string_int()); /* -------- rpc services -------- */ /* token for web access to repo */ searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_web_get_access_token, "seafile_web_get_access_token", searpc_signature_string__string_string_string_string_int()); searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_web_query_access_token, "seafile_web_query_access_token", searpc_signature_object__string()); searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_query_zip_progress, "seafile_query_zip_progress", searpc_signature_string__string()); searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_cancel_zip_task, "cancel_zip_task", searpc_signature_int__string()); /* Copy task related. */ searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_get_copy_task, "get_copy_task", searpc_signature_object__string()); searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_cancel_copy_task, "cancel_copy_task", searpc_signature_int__string()); /* password management */ searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_check_passwd, "seafile_check_passwd", searpc_signature_int__string_string()); searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_set_passwd, "seafile_set_passwd", searpc_signature_int__string_string_string()); searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_unset_passwd, "seafile_unset_passwd", searpc_signature_int__string_string()); searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_is_passwd_set, "seafile_is_passwd_set", searpc_signature_int__string_string()); searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_get_decrypt_key, "seafile_get_decrypt_key", searpc_signature_object__string_string()); /* quota management */ searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_set_user_quota, "set_user_quota", searpc_signature_int__string_int64()); searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_get_user_quota, "get_user_quota", searpc_signature_int64__string()); searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_check_quota, "check_quota", searpc_signature_int__string_int64()); searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_list_user_quota_usage, "list_user_quota_usage", searpc_signature_objlist__void()); /* repo permission */ searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_check_permission, "check_permission", searpc_signature_string__string_string()); searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_set_repo_status, "set_repo_status", searpc_signature_int__string_int()); searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_get_repo_status, "get_repo_status", searpc_signature_int__string()); /* folder permission */ searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_check_permission_by_path, "check_permission_by_path", searpc_signature_string__string_string_string()); searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_get_file_id_by_commit_and_path, "seafile_get_file_id_by_commit_and_path", searpc_signature_string__string_string_string()); /* event */ searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_publish_event, "publish_event", searpc_signature_int__string_string()); searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_pop_event, "pop_event", searpc_signature_json__string()); searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_set_inner_pub_repo, "set_inner_pub_repo", searpc_signature_int__string_string()); searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_unset_inner_pub_repo, "unset_inner_pub_repo", searpc_signature_int__string()); searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_is_inner_pub_repo, "is_inner_pub_repo", searpc_signature_int__string()); searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_list_inner_pub_repos, "list_inner_pub_repos", searpc_signature_objlist__void()); searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_count_inner_pub_repos, "count_inner_pub_repos", searpc_signature_int64__void()); searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_list_inner_pub_repos_by_owner, "list_inner_pub_repos_by_owner", searpc_signature_objlist__string()); /* History */ searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_set_repo_history_limit, "set_repo_history_limit", searpc_signature_int__string_int()); searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_get_repo_history_limit, "get_repo_history_limit", searpc_signature_int__string()); searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_set_repo_valid_since, "set_repo_valid_since", searpc_signature_int__string_int64()); /* System default library */ searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_get_system_default_repo_id, "get_system_default_repo_id", searpc_signature_string__void()); /* Trashed repos. */ searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_get_trash_repo_list, "get_trash_repo_list", searpc_signature_objlist__int_int()); searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_del_repo_from_trash, "del_repo_from_trash", searpc_signature_int__string()); searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_restore_repo_from_trash, "restore_repo_from_trash", searpc_signature_int__string()); searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_get_trash_repos_by_owner, "get_trash_repos_by_owner", searpc_signature_objlist__string()); searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_empty_repo_trash, "empty_repo_trash", searpc_signature_int__void()); searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_empty_repo_trash_by_owner, "empty_repo_trash_by_owner", searpc_signature_int__string()); searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_generate_magic_and_random_key, "generate_magic_and_random_key", searpc_signature_object__int_string_string()); /* Config */ searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_get_server_config_int, "get_server_config_int", searpc_signature_int__string_string()); searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_set_server_config_int, "set_server_config_int", searpc_signature_int__string_string_int()); searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_get_server_config_int64, "get_server_config_int64", searpc_signature_int64__string_string()); searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_set_server_config_int64, "set_server_config_int64", searpc_signature_int__string_string_int64()); searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_get_server_config_string, "get_server_config_string", searpc_signature_string__string_string()); searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_set_server_config_string, "set_server_config_string", searpc_signature_int__string_string_string()); searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_get_server_config_boolean, "get_server_config_boolean", searpc_signature_int__string_string()); searpc_server_register_function ("seafserv-threaded-rpcserver", seafile_set_server_config_boolean, "set_server_config_boolean", searpc_signature_int__string_string_int()); /*user management*/ searpc_server_register_function ("seafserv-threaded-rpcserver", ccnet_rpc_add_emailuser, "add_emailuser", searpc_signature_int__string_string_int_int()); searpc_server_register_function ("seafserv-threaded-rpcserver", ccnet_rpc_remove_emailuser, "remove_emailuser", searpc_signature_int__string_string()); searpc_server_register_function ("seafserv-threaded-rpcserver", ccnet_rpc_validate_emailuser, "validate_emailuser", searpc_signature_int__string_string()); searpc_server_register_function ("seafserv-threaded-rpcserver", ccnet_rpc_get_emailuser, "get_emailuser", searpc_signature_object__string()); searpc_server_register_function ("seafserv-threaded-rpcserver", ccnet_rpc_get_emailuser_with_import, "get_emailuser_with_import", searpc_signature_object__string()); searpc_server_register_function ("seafserv-threaded-rpcserver", ccnet_rpc_get_emailuser_by_id, "get_emailuser_by_id", searpc_signature_object__int()); searpc_server_register_function ("seafserv-threaded-rpcserver", ccnet_rpc_get_emailusers, "get_emailusers", searpc_signature_objlist__string_int_int_string()); searpc_server_register_function ("seafserv-threaded-rpcserver", ccnet_rpc_search_emailusers, "search_emailusers", searpc_signature_objlist__string_string_int_int()); searpc_server_register_function ("seafserv-threaded-rpcserver", ccnet_rpc_count_emailusers, "count_emailusers", searpc_signature_int64__string()); searpc_server_register_function ("seafserv-threaded-rpcserver", ccnet_rpc_count_inactive_emailusers, "count_inactive_emailusers", searpc_signature_int64__string()); searpc_server_register_function ("seafserv-threaded-rpcserver", ccnet_rpc_update_emailuser, "update_emailuser", searpc_signature_int__string_int_string_int_int()); searpc_server_register_function ("seafserv-threaded-rpcserver", ccnet_rpc_update_role_emailuser, "update_role_emailuser", searpc_signature_int__string_string()); searpc_server_register_function ("seafserv-threaded-rpcserver", ccnet_rpc_get_superusers, "get_superusers", searpc_signature_objlist__void()); searpc_server_register_function ("seafserv-threaded-rpcserver", ccnet_rpc_get_emailusers_in_list, "get_emailusers_in_list", searpc_signature_objlist__string_string()); searpc_server_register_function ("seafserv-threaded-rpcserver", ccnet_rpc_update_emailuser_id, "update_emailuser_id", searpc_signature_int__string_string()); /*group management*/ searpc_server_register_function ("seafserv-threaded-rpcserver", ccnet_rpc_create_group, "create_group", searpc_signature_int__string_string_string_int()); searpc_server_register_function ("seafserv-threaded-rpcserver", ccnet_rpc_create_org_group, "create_org_group", searpc_signature_int__int_string_string_int()); searpc_server_register_function ("seafserv-threaded-rpcserver", ccnet_rpc_remove_group, "remove_group", searpc_signature_int__int()); searpc_server_register_function ("seafserv-threaded-rpcserver", ccnet_rpc_group_add_member, "group_add_member", searpc_signature_int__int_string_string()); searpc_server_register_function ("seafserv-threaded-rpcserver", ccnet_rpc_group_remove_member, "group_remove_member", searpc_signature_int__int_string_string()); searpc_server_register_function ("seafserv-threaded-rpcserver", ccnet_rpc_group_set_admin, "group_set_admin", searpc_signature_int__int_string()); searpc_server_register_function ("seafserv-threaded-rpcserver", ccnet_rpc_group_unset_admin, "group_unset_admin", searpc_signature_int__int_string()); searpc_server_register_function ("seafserv-threaded-rpcserver", ccnet_rpc_set_group_name, "set_group_name", searpc_signature_int__int_string()); searpc_server_register_function ("seafserv-threaded-rpcserver", ccnet_rpc_quit_group, "quit_group", searpc_signature_int__int_string()); searpc_server_register_function ("seafserv-threaded-rpcserver", ccnet_rpc_get_groups, "get_groups", searpc_signature_objlist__string_int()); searpc_server_register_function ("seafserv-threaded-rpcserver", ccnet_rpc_list_all_departments, "list_all_departments", searpc_signature_objlist__void()); searpc_server_register_function ("seafserv-threaded-rpcserver", ccnet_rpc_get_all_groups, "get_all_groups", searpc_signature_objlist__int_int_string()); searpc_server_register_function ("seafserv-threaded-rpcserver", ccnet_rpc_get_ancestor_groups, "get_ancestor_groups", searpc_signature_objlist__int()); searpc_server_register_function ("seafserv-threaded-rpcserver", ccnet_rpc_get_group, "get_group", searpc_signature_object__int()); searpc_server_register_function ("seafserv-threaded-rpcserver", ccnet_rpc_get_group_members, "get_group_members", searpc_signature_objlist__int_int_int()); searpc_server_register_function ("seafserv-threaded-rpcserver", ccnet_rpc_get_members_with_prefix, "get_members_with_prefix", searpc_signature_objlist__int_string()); searpc_server_register_function ("seafserv-threaded-rpcserver", ccnet_rpc_check_group_staff, "check_group_staff", searpc_signature_int__int_string_int()); searpc_server_register_function ("seafserv-threaded-rpcserver", ccnet_rpc_remove_group_user, "remove_group_user", searpc_signature_int__string()); searpc_server_register_function ("seafserv-threaded-rpcserver", ccnet_rpc_is_group_user, "is_group_user", searpc_signature_int__int_string_int()); searpc_server_register_function ("seafserv-threaded-rpcserver", ccnet_rpc_set_group_creator, "set_group_creator", searpc_signature_int__int_string()); searpc_server_register_function ("seafserv-threaded-rpcserver", ccnet_rpc_search_groups, "search_groups", searpc_signature_objlist__string_int_int()); searpc_server_register_function ("seafserv-threaded-rpcserver", ccnet_rpc_search_group_members, "search_group_members", searpc_signature_objlist__int_string()); searpc_server_register_function ("seafserv-threaded-rpcserver", ccnet_rpc_get_top_groups, "get_top_groups", searpc_signature_objlist__int()); searpc_server_register_function ("seafserv-threaded-rpcserver", ccnet_rpc_get_child_groups, "get_child_groups", searpc_signature_objlist__int()); searpc_server_register_function ("seafserv-threaded-rpcserver", ccnet_rpc_get_descendants_groups, "get_descendants_groups", searpc_signature_objlist__int()); searpc_server_register_function ("seafserv-threaded-rpcserver", ccnet_rpc_get_groups_members, "get_groups_members", searpc_signature_objlist__string()); /*org management*/ searpc_server_register_function ("seafserv-threaded-rpcserver", ccnet_rpc_create_org, "create_org", searpc_signature_int__string_string_string()); searpc_server_register_function ("seafserv-threaded-rpcserver", ccnet_rpc_remove_org, "remove_org", searpc_signature_int__int()); searpc_server_register_function ("seafserv-threaded-rpcserver", ccnet_rpc_get_all_orgs, "get_all_orgs", searpc_signature_objlist__int_int()); searpc_server_register_function ("seafserv-threaded-rpcserver", ccnet_rpc_count_orgs, "count_orgs", searpc_signature_int64__void()); searpc_server_register_function ("seafserv-threaded-rpcserver", ccnet_rpc_get_org_by_url_prefix, "get_org_by_url_prefix", searpc_signature_object__string()); searpc_server_register_function ("seafserv-threaded-rpcserver", ccnet_rpc_get_org_by_id, "get_org_by_id", searpc_signature_object__int()); searpc_server_register_function ("seafserv-threaded-rpcserver", ccnet_rpc_add_org_user, "add_org_user", searpc_signature_int__int_string_int()); searpc_server_register_function ("seafserv-threaded-rpcserver", ccnet_rpc_remove_org_user, "remove_org_user", searpc_signature_int__int_string()); searpc_server_register_function ("seafserv-threaded-rpcserver", ccnet_rpc_get_orgs_by_user, "get_orgs_by_user", searpc_signature_objlist__string()); searpc_server_register_function ("seafserv-threaded-rpcserver", ccnet_rpc_get_org_emailusers, "get_org_emailusers", searpc_signature_objlist__string_int_int()); searpc_server_register_function ("seafserv-threaded-rpcserver", ccnet_rpc_add_org_group, "add_org_group", searpc_signature_int__int_int()); searpc_server_register_function ("seafserv-threaded-rpcserver", ccnet_rpc_remove_org_group, "remove_org_group", searpc_signature_int__int_int()); searpc_server_register_function ("seafserv-threaded-rpcserver", ccnet_rpc_is_org_group, "is_org_group", searpc_signature_int__int()); searpc_server_register_function ("seafserv-threaded-rpcserver", ccnet_rpc_get_org_id_by_group, "get_org_id_by_group", searpc_signature_int__int()); searpc_server_register_function ("seafserv-threaded-rpcserver", ccnet_rpc_get_org_groups, "get_org_groups", searpc_signature_objlist__int_int_int()); searpc_server_register_function ("seafserv-threaded-rpcserver", ccnet_rpc_get_org_groups_by_user, "get_org_groups_by_user", searpc_signature_objlist__string_int()); searpc_server_register_function ("seafserv-threaded-rpcserver", ccnet_rpc_get_org_top_groups, "get_org_top_groups", searpc_signature_objlist__int()); searpc_server_register_function ("seafserv-threaded-rpcserver", ccnet_rpc_org_user_exists, "org_user_exists", searpc_signature_int__int_string()); searpc_server_register_function ("seafserv-threaded-rpcserver", ccnet_rpc_is_org_staff, "is_org_staff", searpc_signature_int__int_string()); searpc_server_register_function ("seafserv-threaded-rpcserver", ccnet_rpc_set_org_staff, "set_org_staff", searpc_signature_int__int_string()); searpc_server_register_function ("seafserv-threaded-rpcserver", ccnet_rpc_unset_org_staff, "unset_org_staff", searpc_signature_int__int_string()); searpc_server_register_function ("seafserv-threaded-rpcserver", ccnet_rpc_set_org_name, "set_org_name", searpc_signature_int__int_string()); if (rpc_pipe_path) { pipe_path = g_build_path ("/", rpc_pipe_path, SEAFILE_RPC_PIPE_NAME, NULL); } else { pipe_path = g_build_path ("/", seafile_dir, SEAFILE_RPC_PIPE_NAME, NULL); } rpc_server = searpc_create_named_pipe_server_with_threadpool (pipe_path, NAMED_PIPE_SERVER_THREAD_POOL_SIZE); g_free(pipe_path); if (!rpc_server) { seaf_warning ("Failed to create rpc server.\n"); exit (1); } rpc_server->use_epoll = TRUE; if (searpc_named_pipe_server_start(rpc_server) < 0) { seaf_warning ("Failed to start rpc server.\n"); exit (1); } } static struct event sigusr1; static void sigusr1Handler (int fd, short event, void *user_data) { seafile_log_reopen (); } static void set_signal_handlers (SeafileSession *session) { #ifndef WIN32 signal (SIGPIPE, SIG_IGN); /* design as reopen log */ event_set(&sigusr1, SIGUSR1, EV_SIGNAL | EV_PERSIST, sigusr1Handler, NULL); event_add(&sigusr1, NULL); #endif } static void remove_pidfile (const char *pidfile) { if (pidfile) { g_unlink (pidfile); } } static int write_pidfile (const char *pidfile_path) { if (!pidfile_path) return -1; pid_t pid = getpid(); FILE *pidfile = g_fopen(pidfile_path, "w"); if (!pidfile) { seaf_warning ("Failed to fopen() pidfile %s: %s\n", pidfile_path, strerror(errno)); return -1; } char buf[32]; snprintf (buf, sizeof(buf), "%d\n", pid); if (fputs(buf, pidfile) < 0) { seaf_warning ("Failed to write pidfile %s: %s\n", pidfile_path, strerror(errno)); fclose (pidfile); return -1; } fflush (pidfile); fclose (pidfile); return 0; } static void on_seaf_server_exit(void) { if (pidfile) remove_pidfile (pidfile); } #ifdef WIN32 /* Get the commandline arguments in unicode, then convert them to utf8 */ static char ** get_argv_utf8 (int *argc) { int i = 0; char **argv = NULL; const wchar_t *cmdline = NULL; wchar_t **argv_w = NULL; cmdline = GetCommandLineW(); argv_w = CommandLineToArgvW (cmdline, argc); if (!argv_w) { printf("failed to CommandLineToArgvW(), GLE=%lu\n", GetLastError()); return NULL; } argv = (char **)malloc (sizeof(char*) * (*argc)); for (i = 0; i < *argc; i++) { argv[i] = wchar_to_utf8 (argv_w[i]); } return argv; } #endif int test_seafile_config(const char *central_config_dir, const char *config_dir, const char *seafile_dir) { #if !GLIB_CHECK_VERSION(2, 36, 0) g_type_init (); #endif config_dir = ccnet_expand_path (config_dir); if (central_config_dir) { central_config_dir = ccnet_expand_path (central_config_dir); } seafile_log_init ("-", "debug", "debug", "seaf-server"); srand (time(NULL)); event_init (); seaf = seafile_session_new (central_config_dir, seafile_dir, config_dir); if (!seaf) { seaf_error ("Error: failed to create ccnet session\n"); return -1; } if (seafile_session_init (seaf) < 0) return -1; return 0; } int main (int argc, char **argv) { int c; char *ccnet_dir = DEFAULT_CONFIG_DIR; char *seafile_dir = NULL; char *central_config_dir = NULL; char *logfile = NULL; char *rpc_pipe_path = NULL; const char *debug_str = NULL; int daemon_mode = 1; gboolean test_config = FALSE; char *repo_id = NULL; #ifdef WIN32 argv = get_argv_utf8 (&argc); #endif while ((c = getopt_long (argc, argv, short_options, long_options, NULL)) != EOF) { switch (c) { case 'h': exit (1); break; case 'v': exit (1); break; case 'c': ccnet_dir = optarg; break; case 'd': seafile_dir = g_strdup(optarg); break; case 'F': central_config_dir = g_strdup(optarg); break; case 'f': daemon_mode = 0; break; case 'l': logfile = g_strdup(optarg); break; case 'D': debug_str = optarg; break; case 'P': pidfile = optarg; break; case 'p': rpc_pipe_path = g_strdup (optarg); break; case 't': test_config = TRUE; break; case 'r': repo_id = g_strdup (optarg); break; default: usage (); exit (1); } } argc -= optind; argv += optind; if (test_config) { return test_seafile_config (central_config_dir, ccnet_dir, seafile_dir); } const char *log_to_stdout_env = g_getenv("SEAFILE_LOG_TO_STDOUT"); if (g_strcmp0 (log_to_stdout_env, "true") == 0) { daemon_mode = 0; } #ifndef WIN32 if (daemon_mode) { #ifndef __APPLE__ daemon (1, 0); #else /* __APPLE */ /* daemon is deprecated under APPLE * use fork() instead * */ switch (fork ()) { case -1: seaf_warning ("Failed to daemonize"); exit (-1); break; case 0: /* all good*/ break; default: /* kill origin process */ exit (0); } #endif /* __APPLE */ } #endif /* !WIN32 */ cdc_init (); #if !GLIB_CHECK_VERSION(2, 35, 0) g_type_init(); #endif #if !GLIB_CHECK_VERSION(2,32,0) g_thread_init (NULL); #endif if (!debug_str) debug_str = g_getenv("SEAFILE_DEBUG"); seafile_debug_set_flags_string (debug_str); if (seafile_dir == NULL) seafile_dir = g_build_filename (ccnet_dir, "seafile", NULL); if (logfile == NULL) logfile = g_build_filename (seafile_dir, "seafile.log", NULL); if (seafile_log_init (logfile, "info", "debug", "seaf-server") < 0) { seaf_warning ("Failed to init log.\n"); exit (1); } event_init (); if (repo_id) { seaf = seafile_repair_session_new (central_config_dir, seafile_dir, ccnet_dir); if (!seaf) { seaf_warning ("Failed to create repair seafile session.\n"); exit (1); } seaf_repo_manager_repair_virtual_repo (repo_id); exit (0); } seaf = seafile_session_new (central_config_dir, seafile_dir, ccnet_dir); if (!seaf) { seaf_warning ("Failed to create seafile session.\n"); exit (1); } #ifndef WIN32 set_syslog_config (seaf->config); #endif set_signal_handlers (seaf); /* Create pid file before connecting to database. * Connecting to database and creating tables may take long if the db * is on a remote host. This may make controller think seaf-server fails * to start and restart it. */ if (pidfile) { if (write_pidfile (pidfile) < 0) { ccnet_message ("Failed to write pidfile\n"); return -1; } } /* init seaf */ if (seafile_session_init (seaf) < 0) exit (1); if (seafile_session_start (seaf) < 0) exit (1); start_rpc_service (seafile_dir, rpc_pipe_path); g_free (seafile_dir); g_free (logfile); g_free (rpc_pipe_path); atexit (on_seaf_server_exit); /* Create a system default repo to contain the tutorial file. */ schedule_create_system_default_repo (seaf); event_dispatch (); return 0; } ================================================ FILE: server/seafile-session.c ================================================ /* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */ #include "common.h" #include #include #include #include #include #include #include #include #include #include #include #include "utils.h" #include "seafile-session.h" #include "mq-mgr.h" #include "seaf-db.h" #include "seaf-utils.h" #include "log.h" #define CONNECT_INTERVAL_MSEC 10 * 1000 #define DEFAULT_THREAD_POOL_SIZE 500 #define DEFAULT_FIXED_BLOCK_SIZE ((gint64)1 << 23) /* 8MB */ static void load_fileserver_config (SeafileSession *session) { int web_token_expire_time; int max_index_processing_threads; int fixed_block_size_mb; int max_indexing_threads; gint64 max_upload_size; web_token_expire_time = g_key_file_get_integer (session->config, "fileserver", "web_token_expire_time", NULL); if (web_token_expire_time <= 0) { session->web_token_expire_time = 3600; } else { session->web_token_expire_time = web_token_expire_time; } seaf_message ("fileserver: web_token_expire_time = %d\n", session->web_token_expire_time); max_index_processing_threads = g_key_file_get_integer (session->config, "fileserver", "max_index_processing_threads", NULL); if (max_index_processing_threads <= 0) { session->max_index_processing_threads = 3; } else { session->max_index_processing_threads = max_index_processing_threads; } seaf_message ("fileserver: max_index_processing_threads= %d\n", session->max_index_processing_threads); fixed_block_size_mb = g_key_file_get_integer (session->config, "fileserver", "fixed_block_size", NULL); if (fixed_block_size_mb <= 0){ session->fixed_block_size = DEFAULT_FIXED_BLOCK_SIZE; } else { session->fixed_block_size = fixed_block_size_mb * ((gint64)1 << 20); } seaf_message ("fileserver: fixed_block_size = %"G_GINT64_FORMAT"\n", session->fixed_block_size); max_indexing_threads = g_key_file_get_integer (session->config, "fileserver", "max_indexing_threads", NULL); if (max_indexing_threads <= 0) { session->max_indexing_threads = 1; } else { session->max_indexing_threads = max_indexing_threads; } seaf_message ("fileserver: max_indexing_threads = %d\n", session->max_indexing_threads); GError *err = NULL; max_upload_size = g_key_file_get_int64(session->config, "fileserver", "max_upload_size", &err); if (err) { max_upload_size = -1; g_clear_error(&err); } else if (max_upload_size > 0) { max_upload_size = max_upload_size * 1000000; } session->max_upload_size = max_upload_size; seaf_message ("fileserver: max_upload_size = %d\n", session->max_upload_size); return; } static int load_config (SeafileSession *session, const char *config_file_path) { int ret = 0; GError *error = NULL; GKeyFile *config = NULL; const char *notif_server = NULL; const char *enable_notif_server = NULL; const char *private_key = NULL; const char *site_root = NULL; const char *log_to_stdout = NULL; const char *node_name = NULL; const char *use_go_fileserver = NULL; config = g_key_file_new (); if (!g_key_file_load_from_file (config, config_file_path, G_KEY_FILE_NONE, &error)) { seaf_warning ("Failed to load config file.\n"); ret = -1; goto out; } session->config = config; session->cloud_mode = g_key_file_get_boolean (config, "general", "cloud_mode", NULL); session->go_fileserver = g_key_file_get_boolean (config, "fileserver", "use_go_fileserver", NULL); session->obj_cache = objcache_new (); // Read config from env private_key = g_getenv("JWT_PRIVATE_KEY"); site_root = g_getenv("SITE_ROOT"); log_to_stdout = g_getenv("SEAFILE_LOG_TO_STDOUT"); notif_server = g_getenv("INNER_NOTIFICATION_SERVER_URL"); enable_notif_server = g_getenv("ENABLE_NOTIFICATION_SERVER"); node_name = g_getenv("NODE_NAME"); use_go_fileserver = g_getenv("ENABLE_GO_FILESERVER"); if (!private_key) { seaf_warning ("Failed to read JWT_PRIVATE_KEY.\n"); ret = -1; goto out; } if ((notif_server && g_strcmp0 (notif_server, "") != 0) && (enable_notif_server && g_strcmp0 (enable_notif_server, "true") == 0)) { session->notif_server_private_key = g_strdup (private_key); session->notif_url = g_strdup (notif_server); } session->seahub_pk = g_strdup (private_key); if (!site_root || g_strcmp0 (site_root, "") == 0) { site_root = "/"; } session->seahub_url = g_strdup_printf("http://127.0.0.1:8000%sapi/v2.1/internal", site_root); session->seahub_conn_pool = connection_pool_new (); if (g_strcmp0 (log_to_stdout, "true") == 0) { session->log_to_stdout = TRUE; } if (!node_name || g_strcmp0 (node_name, "") == 0) { node_name = "default"; } session->node_name = g_strdup (node_name); if (use_go_fileserver && g_strcmp0 (use_go_fileserver, "true") == 0) { session->go_fileserver = TRUE; } out: if (ret < 0) { if (config) g_key_file_free (config); } return ret; } SeafileSession * seafile_session_new(const char *central_config_dir, const char *seafile_dir, const char *ccnet_dir) { char *abs_central_config_dir = NULL; char *abs_seafile_dir; char *abs_ccnet_dir = NULL; char *tmp_file_dir; char *config_file_path = NULL; SeafileSession *session = NULL; abs_ccnet_dir = ccnet_expand_path (ccnet_dir); abs_seafile_dir = ccnet_expand_path (seafile_dir); tmp_file_dir = g_build_filename (abs_seafile_dir, "tmpfiles", NULL); if (central_config_dir) { abs_central_config_dir = ccnet_expand_path (central_config_dir); } if (checkdir_with_mkdir (abs_seafile_dir) < 0) { seaf_warning ("Config dir %s does not exist and is unable to create\n", abs_seafile_dir); goto onerror; } if (checkdir_with_mkdir (tmp_file_dir) < 0) { seaf_warning ("Temp file dir %s does not exist and is unable to create\n", tmp_file_dir); goto onerror; } if (checkdir_with_mkdir (abs_ccnet_dir) < 0) { seaf_warning ("Ccnet config dir %s does not exist and is unable to create\n", abs_ccnet_dir); goto onerror; } config_file_path = g_build_filename( abs_central_config_dir ? abs_central_config_dir : abs_seafile_dir, "seafile.conf", NULL); session = g_new0(SeafileSession, 1); session->seaf_dir = abs_seafile_dir; session->ccnet_dir = abs_ccnet_dir; session->tmp_file_dir = tmp_file_dir; if (load_config (session, config_file_path) < 0) { goto onerror; } load_fileserver_config (session); if (load_database_config (session) < 0) { seaf_warning ("Failed to load database config.\n"); goto onerror; } if (load_ccnet_database_config (session) < 0) { seaf_warning ("Failed to load ccnet database config.\n"); goto onerror; } session->cfg_mgr = seaf_cfg_manager_new (session); if (!session->cfg_mgr) goto onerror; session->fs_mgr = seaf_fs_manager_new (session, abs_seafile_dir); if (!session->fs_mgr) goto onerror; session->block_mgr = seaf_block_manager_new (session, abs_seafile_dir); if (!session->block_mgr) goto onerror; session->commit_mgr = seaf_commit_manager_new (session); if (!session->commit_mgr) goto onerror; session->repo_mgr = seaf_repo_manager_new (session); if (!session->repo_mgr) goto onerror; session->branch_mgr = seaf_branch_manager_new (session); if (!session->branch_mgr) goto onerror; session->share_mgr = seaf_share_manager_new (session); if (!session->share_mgr) goto onerror; session->web_at_mgr = seaf_web_at_manager_new (session); if (!session->web_at_mgr) goto onerror; session->passwd_mgr = seaf_passwd_manager_new (session); if (!session->passwd_mgr) goto onerror; session->quota_mgr = seaf_quota_manager_new (session); if (!session->quota_mgr) goto onerror; session->copy_mgr = seaf_copy_manager_new (session); if (!session->copy_mgr) goto onerror; session->job_mgr = ccnet_job_manager_new (DEFAULT_THREAD_POOL_SIZE); session->size_sched = size_scheduler_new (session); session->mq_mgr = seaf_mq_manager_new (); if (!session->mq_mgr) goto onerror; #ifdef HAVE_EVHTP session->http_server = seaf_http_server_new (session); if (!session->http_server) goto onerror; session->zip_download_mgr = zip_download_mgr_new (); if (!session->zip_download_mgr) goto onerror; #endif session->index_blocks_mgr = index_blocks_mgr_new (session); if (!session->index_blocks_mgr) goto onerror; session->user_mgr = ccnet_user_manager_new (session); if (!session->user_mgr) goto onerror; session->group_mgr = ccnet_group_manager_new (session); if (!session->group_mgr) goto onerror; session->org_mgr = ccnet_org_manager_new (session); if (!session->org_mgr) goto onerror; if (session->notif_url) { session->notif_mgr = seaf_notif_manager_new (session, session->notif_url); if (!session->notif_mgr) { goto onerror; } } session->metric_mgr = seaf_metric_manager_new (session); if (!session->metric_mgr) goto onerror; return session; onerror: g_free (config_file_path); free (abs_seafile_dir); free (abs_ccnet_dir); g_free (tmp_file_dir); g_free (session); return NULL; } SeafileSession * seafile_repair_session_new(const char *central_config_dir, const char *seafile_dir, const char *ccnet_dir) { char *abs_central_config_dir = NULL; char *abs_seafile_dir; char *abs_ccnet_dir = NULL; char *tmp_file_dir; char *config_file_path; GKeyFile *config; SeafileSession *session = NULL; gboolean notif_enabled = FALSE; int notif_port = 8083; gboolean cluster_mode; gboolean use_block_cache; int block_cache_size_limit; char **block_cache_file_types; gint64 repo_file_number_limit = -1; abs_ccnet_dir = ccnet_expand_path (ccnet_dir); abs_seafile_dir = ccnet_expand_path (seafile_dir); tmp_file_dir = g_build_filename (abs_seafile_dir, "tmpfiles", NULL); if (central_config_dir) { abs_central_config_dir = ccnet_expand_path (central_config_dir); } config_file_path = g_build_filename( abs_central_config_dir ? abs_central_config_dir : abs_seafile_dir, "seafile.conf", NULL); GError *error = NULL; config = g_key_file_new (); if (!g_key_file_load_from_file (config, config_file_path, G_KEY_FILE_NONE, &error)) { seaf_warning ("Failed to load config file.\n"); g_key_file_free (config); g_free (config_file_path); goto onerror; } g_free (config_file_path); session = g_new0(SeafileSession, 1); session->seaf_dir = abs_seafile_dir; session->ccnet_dir = abs_ccnet_dir; session->tmp_file_dir = tmp_file_dir; session->config = config; session->is_repair = TRUE; if (load_database_config (session) < 0) { seaf_warning ("Failed to load database config.\n"); goto onerror; } if (load_ccnet_database_config (session) < 0) { seaf_warning ("Failed to load ccnet database config.\n"); goto onerror; } session->fs_mgr = seaf_fs_manager_new (session, abs_seafile_dir); if (!session->fs_mgr) goto onerror; session->block_mgr = seaf_block_manager_new (session, abs_seafile_dir); if (!session->block_mgr) goto onerror; session->commit_mgr = seaf_commit_manager_new (session); if (!session->commit_mgr) goto onerror; session->repo_mgr = seaf_repo_manager_new (session); if (!session->repo_mgr) goto onerror; session->branch_mgr = seaf_branch_manager_new (session); if (!session->branch_mgr) goto onerror; session->job_mgr = ccnet_job_manager_new (DEFAULT_THREAD_POOL_SIZE); session->size_sched = size_scheduler_new (session); return session; onerror: free (abs_seafile_dir); free (abs_ccnet_dir); g_free (tmp_file_dir); g_free (session); return NULL; } int seafile_session_init (SeafileSession *session) { if (seaf_commit_manager_init (session->commit_mgr) < 0) return -1; if (seaf_fs_manager_init (session->fs_mgr) < 0) return -1; if (seaf_branch_manager_init (session->branch_mgr) < 0) { seaf_warning ("Failed to init branch manager.\n"); return -1; } if (seaf_repo_manager_init (session->repo_mgr) < 0) { seaf_warning ("Failed to init repo manager.\n"); return -1; } if (seaf_quota_manager_init (session->quota_mgr) < 0) { seaf_warning ("Failed to init quota manager.\n"); return -1; } if (ccnet_user_manager_prepare (session->user_mgr) < 0) { seaf_warning ("Failed to init user manager.\n"); return -1; } if (ccnet_group_manager_prepare (session->group_mgr) < 0) { seaf_warning ("Failed to init group manager.\n"); return -1; } if (ccnet_org_manager_prepare (session->org_mgr) < 0) { seaf_warning ("Failed to init org manager.\n"); return -1; } if ((session->create_tables || seaf_db_type(session->db) == SEAF_DB_TYPE_PGSQL) && seaf_cfg_manager_init (session->cfg_mgr) < 0) { seaf_warning ("Failed to init config manager.\n"); return -1; } return 0; } int seafile_session_start (SeafileSession *session) { if (seaf_share_manager_start (session->share_mgr) < 0) { seaf_warning ("Failed to start share manager.\n"); return -1; } if (seaf_web_at_manager_start (session->web_at_mgr) < 0) { seaf_warning ("Failed to start web access check manager.\n"); return -1; } if (seaf_passwd_manager_start (session->passwd_mgr) < 0) { seaf_warning ("Failed to start password manager.\n"); return -1; } if (size_scheduler_start (session->size_sched) < 0) { seaf_warning ("Failed to start size scheduler.\n"); return -1; } if (seaf_copy_manager_start (session->copy_mgr) < 0) { seaf_warning ("Failed to start copy manager.\n"); return -1; } if (!session->go_fileserver) { #ifdef HAVE_EVHTP if (seaf_http_server_start (session->http_server) < 0) { seaf_warning ("Failed to start http server thread.\n"); return -1; } #else seaf_warning ("Failed to start http server thread, please use go fileserver.\n"); return -1; #endif } if (seaf_metric_manager_start (session->metric_mgr) < 0) { seaf_warning ("Failed to start metric manager.\n"); return -1; } return 0; } char * get_system_default_repo_id (SeafileSession *session) { char *sql = "SELECT info_value FROM SystemInfo WHERE info_key='default_repo_id'"; return seaf_db_get_string (session->db, sql); } int set_system_default_repo_id (SeafileSession *session, const char *repo_id) { char sql[256]; snprintf (sql, sizeof(sql), "INSERT INTO SystemInfo (info_key, info_value) VALUES ('default_repo_id', '%s')", repo_id); return seaf_db_query (session->db, sql); } static int del_system_default_repo_id (SeafileSession *session) { const char *sql = "DELETE FROM SystemInfo WHERE info_key='default_repo_id'"; return seaf_db_query (session->db, sql); } #define DEFAULT_TEMPLATE_DIR "library-template" static void copy_template_files_recursive (SeafileSession *session, const char *repo_id, const char *repo_dir_path, const char *dir_path) { GDir *dir; const char *name; char *sub_path, *repo_sub_path; SeafStat st; GError *error = NULL; int rc; dir = g_dir_open (dir_path, 0, &error); if (!dir) { seaf_warning ("Failed to open template dir %s: %s.\n", dir_path, error->message); return; } while ((name = g_dir_read_name(dir)) != NULL) { sub_path = g_build_filename (dir_path, name, NULL); if (seaf_stat (sub_path, &st) < 0) { seaf_warning ("Failed to stat %s: %s.\n", sub_path, strerror(errno)); g_free (sub_path); continue; } if (S_ISREG(st.st_mode)) { rc = seaf_repo_manager_post_file (session->repo_mgr, repo_id, sub_path, repo_dir_path, name, "System", NULL); if (rc < 0) seaf_warning ("Failed to add template file %s.\n", sub_path); } else if (S_ISDIR(st.st_mode)) { rc = seaf_repo_manager_post_dir (session->repo_mgr, repo_id, repo_dir_path, name, "System", NULL); if (rc < 0) { seaf_warning ("Failed to add template dir %s.\n", sub_path); g_free (sub_path); continue; } repo_sub_path = g_build_path ("/", repo_dir_path, name, NULL); copy_template_files_recursive (session, repo_id, repo_sub_path, sub_path); g_free (repo_sub_path); } g_free (sub_path); } g_dir_close (dir); } static void * create_system_default_repo (void *data) { SeafileSession *session = data; char *repo_id; char *template_path; /* If default repo is not set or doesn't exist, create a new one. */ repo_id = get_system_default_repo_id (session); if (repo_id != NULL) { SeafRepo *repo; repo = seaf_repo_manager_get_repo (session->repo_mgr, repo_id); if (!repo) { seaf_warning ("Failed to get system default repo. Create a new one.\n"); del_system_default_repo_id (session); seaf_repo_manager_del_repo (session->repo_mgr, repo_id, NULL); g_free (repo_id); } else { seaf_repo_unref (repo); g_free (repo_id); return data; } } repo_id = seaf_repo_manager_create_new_repo (session->repo_mgr, "My Library Template", "Template for creating 'My Library' for users", "System", NULL, -1, NULL, NULL, NULL); if (!repo_id) { seaf_warning ("Failed to create system default repo.\n"); return data; } set_system_default_repo_id (session, repo_id); template_path = g_build_filename (session->seaf_dir, DEFAULT_TEMPLATE_DIR, NULL); copy_template_files_recursive (session, repo_id, "/", template_path); g_free (repo_id); g_free (template_path); return data; } void schedule_create_system_default_repo (SeafileSession *session) { int db_type = seaf_db_type (session->db); char *sql; if (db_type == SEAF_DB_TYPE_MYSQL) sql = "CREATE TABLE IF NOT EXISTS SystemInfo " "(id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, " "info_key VARCHAR(256), info_value VARCHAR(1024))"; else sql = "CREATE TABLE IF NOT EXISTS SystemInfo( " "info_key VARCHAR(256), info_value VARCHAR(1024))"; if ((session->create_tables || db_type == SEAF_DB_TYPE_PGSQL) && seaf_db_query (session->db, sql) < 0) return; ccnet_job_manager_schedule_job (session->job_mgr, create_system_default_repo, NULL, session); } ================================================ FILE: server/seafile-session.h ================================================ /* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */ #ifndef SEAFILE_SESSION_H #define SEAFILE_SESSION_H #include #include "block-mgr.h" #include "fs-mgr.h" #include "commit-mgr.h" #include "branch-mgr.h" #include "repo-mgr.h" #include "db.h" #include "seaf-db.h" #include "mq-mgr.h" #include "user-mgr.h" #include "group-mgr.h" #include "org-mgr.h" #include "share-mgr.h" #include "web-accesstoken-mgr.h" #include "passwd-mgr.h" #include "quota-mgr.h" #include "size-sched.h" #include "copy-mgr.h" #include "config-mgr.h" #include "http-server.h" #include "zip-download-mgr.h" #include "index-blocks-mgr.h" #include "notif-mgr.h" #include "http-tx-mgr.h" #include "obj-cache.h" #include "metric-mgr.h" #include struct _CcnetClient; typedef struct _SeafileSession SeafileSession; struct _SeafileSession { char *central_config_dir; char *seaf_dir; char *ccnet_dir; char *tmp_file_dir; /* Config that's only loaded on start */ GKeyFile *config; SeafDB *db; CcnetDB *ccnet_db; char *seahub_pk; char *seahub_url; ConnectionPool *seahub_conn_pool; SeafBlockManager *block_mgr; SeafFSManager *fs_mgr; SeafCommitManager *commit_mgr; SeafBranchManager *branch_mgr; SeafRepoManager *repo_mgr; SeafShareManager *share_mgr; SeafPasswdManager *passwd_mgr; SeafQuotaManager *quota_mgr; SeafCopyManager *copy_mgr; SeafCfgManager *cfg_mgr; CcnetUserManager *user_mgr; CcnetGroupManager *group_mgr; CcnetOrgManager *org_mgr; SeafWebAccessTokenManager *web_at_mgr; SeafMqManager *mq_mgr; CcnetJobManager *job_mgr; SizeScheduler *size_sched; int cloud_mode; #ifdef HAVE_EVHTP HttpServerStruct *http_server; ZipDownloadMgr *zip_download_mgr; #endif IndexBlksMgr *index_blocks_mgr; gboolean create_tables; gboolean ccnet_create_tables; gboolean go_fileserver; int web_token_expire_time; int max_index_processing_threads; gint64 fixed_block_size; int max_indexing_threads; gint64 max_upload_size; // For notification server NotifManager *notif_mgr; char *notif_server_private_key; char *notif_url; // For metric SeafMetricManager *metric_mgr; char *node_name; ObjCache *obj_cache; gboolean log_to_stdout; gboolean is_repair; }; extern SeafileSession *seaf; SeafileSession * seafile_session_new(const char *central_config_dir, const char *seafile_dir, const char *ccnet_dir); SeafileSession * seafile_repair_session_new(const char *central_config_dir, const char *seafile_dir, const char *ccnet_dir); int seafile_session_init (SeafileSession *session); int seafile_session_start (SeafileSession *session); char * seafile_session_get_tmp_file_path (SeafileSession *session, const char *basename, char path[]); void schedule_create_system_default_repo (SeafileSession *session); char * get_system_default_repo_id (SeafileSession *session); int set_system_default_repo_id (SeafileSession *session, const char *repo_id); #endif /* SEAFILE_H */ ================================================ FILE: server/share-mgr.c ================================================ /* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */ #include "common.h" #include "utils.h" #include "log.h" #include "seafile-session.h" #include "share-mgr.h" #include "seaf-db.h" #include "log.h" #include "seafile-error.h" SeafShareManager * seaf_share_manager_new (SeafileSession *seaf) { SeafShareManager *mgr = g_new0 (SeafShareManager, 1); mgr->seaf = seaf; return mgr; } int seaf_share_manager_start (SeafShareManager *mgr) { if (!mgr->seaf->create_tables && seaf_db_type (mgr->seaf->db) != SEAF_DB_TYPE_PGSQL) return 0; SeafDB *db = mgr->seaf->db; const char *sql; int db_type = seaf_db_type (db); if (db_type == SEAF_DB_TYPE_MYSQL) { sql = "CREATE TABLE IF NOT EXISTS SharedRepo " "(id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT," "repo_id CHAR(37) , from_email VARCHAR(255), to_email VARCHAR(255), " "permission CHAR(15), INDEX (repo_id), " "INDEX(from_email), INDEX(to_email)) ENGINE=INNODB"; if (seaf_db_query (db, sql) < 0) return -1; } else if (db_type == SEAF_DB_TYPE_SQLITE) { sql = "CREATE TABLE IF NOT EXISTS SharedRepo " "(repo_id CHAR(37) , from_email VARCHAR(255), to_email VARCHAR(255), " "permission CHAR(15))"; if (seaf_db_query (db, sql) < 0) return -1; sql = "CREATE INDEX IF NOT EXISTS RepoIdIndex on SharedRepo (repo_id)"; if (seaf_db_query (db, sql) < 0) return -1; sql = "CREATE INDEX IF NOT EXISTS FromEmailIndex on SharedRepo (from_email)"; if (seaf_db_query (db, sql) < 0) return -1; sql = "CREATE INDEX IF NOT EXISTS ToEmailIndex on SharedRepo (to_email)"; if (seaf_db_query (db, sql) < 0) return -1; } /* else if (db_type == SEAF_DB_TYPE_PGSQL) { */ /* sql = "CREATE TABLE IF NOT EXISTS SharedRepo " */ /* "(repo_id CHAR(36) , from_email VARCHAR(255), to_email VARCHAR(255), " */ /* "permission VARCHAR(15))"; */ /* if (seaf_db_query (db, sql) < 0) */ /* return -1; */ /* if (!pgsql_index_exists (db, "sharedrepo_repoid_idx")) { */ /* sql = "CREATE INDEX sharedrepo_repoid_idx ON SharedRepo (repo_id)"; */ /* if (seaf_db_query (db, sql) < 0) */ /* return -1; */ /* } */ /* if (!pgsql_index_exists (db, "sharedrepo_from_email_idx")) { */ /* sql = "CREATE INDEX sharedrepo_from_email_idx ON SharedRepo (from_email)"; */ /* if (seaf_db_query (db, sql) < 0) */ /* return -1; */ /* } */ /* if (!pgsql_index_exists (db, "sharedrepo_to_email_idx")) { */ /* sql = "CREATE INDEX sharedrepo_to_email_idx ON SharedRepo (to_email)"; */ /* if (seaf_db_query (db, sql) < 0) */ /* return -1; */ /* } */ /* } */ return 0; } int seaf_share_manager_add_share (SeafShareManager *mgr, const char *repo_id, const char *from_email, const char *to_email, const char *permission) { gboolean db_err = FALSE; int ret = 0; char *from_email_l = g_ascii_strdown (from_email, -1); char *to_email_l = g_ascii_strdown (to_email, -1); if (seaf_db_statement_exists (mgr->seaf->db, "SELECT repo_id from SharedRepo " "WHERE repo_id=? AND " "from_email=? AND to_email=?", &db_err, 3, "string", repo_id, "string", from_email_l, "string", to_email_l)) goto out; if (seaf_db_statement_query (mgr->seaf->db, "INSERT INTO SharedRepo (repo_id, from_email, " "to_email, permission) VALUES (?, ?, ?, ?)", 4, "string", repo_id, "string", from_email_l, "string", to_email_l, "string", permission) < 0) { ret = -1; goto out; } out: g_free (from_email_l); g_free (to_email_l); return ret; } int seaf_share_manager_set_subdir_perm_by_path (SeafShareManager *mgr, const char *repo_id, const char *from_email, const char *to_email, const char *permission, const char *path) { char *sql; int ret; char *from_email_l = g_ascii_strdown (from_email, -1); char *to_email_l = g_ascii_strdown (to_email, -1); sql = "UPDATE SharedRepo SET permission=? WHERE repo_id IN " "(SELECT repo_id FROM VirtualRepo WHERE origin_repo=? AND path=?) " "AND from_email=? AND to_email=?"; ret = seaf_db_statement_query (mgr->seaf->db, sql, 5, "string", permission, "string", repo_id, "string", path, "string", from_email_l, "string", to_email_l); g_free (from_email_l); g_free (to_email_l); return ret; } int seaf_share_manager_set_permission (SeafShareManager *mgr, const char *repo_id, const char *from_email, const char *to_email, const char *permission) { char *sql; int ret; char *from_email_l = g_ascii_strdown (from_email, -1); char *to_email_l = g_ascii_strdown (to_email, -1); sql = "UPDATE SharedRepo SET permission=? WHERE " "repo_id=? AND from_email=? AND to_email=?"; ret = seaf_db_statement_query (mgr->seaf->db, sql, 4, "string", permission, "string", repo_id, "string", from_email_l, "string", to_email_l); g_free (from_email_l); g_free (to_email_l); return ret; } static gboolean collect_repos (SeafDBRow *row, void *data) { GList **p_repos = data; const char *repo_id; const char *vrepo_id; const char *email; const char *permission; const char *commit_id; gint64 size; SeafileRepo *repo; repo_id = seaf_db_row_get_column_text (row, 0); vrepo_id = seaf_db_row_get_column_text (row, 1); email = seaf_db_row_get_column_text (row, 2); permission = seaf_db_row_get_column_text (row, 3); commit_id = seaf_db_row_get_column_text (row, 4); size = seaf_db_row_get_column_int64 (row, 5); const char *repo_name = seaf_db_row_get_column_text (row, 8); gint64 update_time = seaf_db_row_get_column_int64 (row, 9); int version = seaf_db_row_get_column_int (row, 10); gboolean is_encrypted = seaf_db_row_get_column_int (row, 11) ? TRUE : FALSE; const char *last_modifier = seaf_db_row_get_column_text (row, 12); int status = seaf_db_row_get_column_int (row, 13); const char *type = seaf_db_row_get_column_text (row, 14); const char *origin_repo_name = seaf_db_row_get_column_text (row, 15); char *email_l = g_ascii_strdown (email, -1); repo = g_object_new (SEAFILE_TYPE_REPO, "share_type", "personal", "repo_id", repo_id, "id", repo_id, "head_cmmt_id", commit_id, "user", email_l, "permission", permission, "is_virtual", (vrepo_id != NULL), "size", size, "status", status, NULL); g_free (email_l); if (repo) { if (vrepo_id) { const char *origin_repo_id = seaf_db_row_get_column_text (row, 6); const char *origin_path = seaf_db_row_get_column_text (row, 7); g_object_set (repo, "store_id", origin_repo_id, "origin_repo_id", origin_repo_id, "origin_repo_name", origin_repo_name, "origin_path", origin_path, NULL); } else { g_object_set (repo, "store_id", repo_id, NULL); } if (repo_name) { g_object_set (repo, "name", repo_name, "repo_name", repo_name, "last_modify", update_time, "last_modified", update_time, "version", version, "encrypted", is_encrypted, "last_modifier", last_modifier, NULL); } if (type) { g_object_set (repo, "repo_type", type, NULL); } *p_repos = g_list_prepend (*p_repos, repo); } return TRUE; } static void seaf_fill_repo_commit_if_not_in_db (GList **repos) { char *repo_name = NULL; char *last_modifier = NULL; char *repo_id = NULL; char *commit_id = NULL; SeafileRepo *repo = NULL; GList *p = NULL; for (p = *repos; p;) { repo = p->data; g_object_get (repo, "name", &repo_name, NULL); g_object_get (repo, "last_modifier", &last_modifier, NULL); if (!repo_name || !last_modifier) { g_object_get (repo, "repo_id", &repo_id, "head_cmmt_id", &commit_id, NULL); SeafCommit *commit = seaf_commit_manager_get_commit_compatible (seaf->commit_mgr, repo_id, commit_id); if (!commit) { seaf_warning ("Commit %s:%s is missing\n", repo_id, commit_id); GList *next = p->next; g_object_unref (repo); *repos = g_list_delete_link (*repos, p); p = next; if (repo_name) g_free (repo_name); if (last_modifier) g_free (last_modifier); continue; } else { g_object_set (repo, "name", commit->repo_name, "repo_name", commit->repo_name, "last_modify", commit->ctime, "last_modified", commit->ctime, "version", commit->version, "encrypted", commit->encrypted, "last_modifier", commit->creator_name, NULL); /* Set to database */ set_repo_commit_to_db (repo_id, commit->repo_name, commit->ctime, commit->version, commit->encrypted, commit->creator_name); seaf_commit_unref (commit); } g_free (repo_id); g_free (commit_id); } if (repo_name) g_free (repo_name); if (last_modifier) g_free (last_modifier); p = p->next; } } GList* seaf_share_manager_list_share_repos (SeafShareManager *mgr, const char *email, const char *type, int start, int limit, gboolean *db_err) { GList *ret = NULL, *p; char *sql; if (start == -1 && limit == -1) { if (g_strcmp0 (type, "from_email") == 0) { sql = "SELECT sh.repo_id, v.repo_id, " "to_email, permission, commit_id, s.size, " "v.origin_repo, v.path, i.name, " "i.update_time, i.version, i.is_encrypted, i.last_modifier, i.status, i.type, " "i2.name FROM " "SharedRepo sh LEFT JOIN VirtualRepo v ON " "sh.repo_id=v.repo_id " "LEFT JOIN RepoSize s ON sh.repo_id = s.repo_id " "LEFT JOIN RepoInfo i ON sh.repo_id = i.repo_id " "LEFT JOIN RepoInfo i2 ON v.origin_repo = i2.repo_id, Branch b " "WHERE from_email=? AND " "sh.repo_id = b.repo_id AND " "b.name = 'master' " "ORDER BY i.update_time DESC, sh.repo_id"; } else if (g_strcmp0 (type, "to_email") == 0) { sql = "SELECT sh.repo_id, v.repo_id, " "from_email, permission, commit_id, s.size, " "v.origin_repo, v.path, i.name, " "i.update_time, i.version, i.is_encrypted, i.last_modifier, i.status, i.type, " "i2.name FROM " "SharedRepo sh LEFT JOIN VirtualRepo v ON " "sh.repo_id=v.repo_id " "LEFT JOIN RepoSize s ON sh.repo_id = s.repo_id " "LEFT JOIN RepoInfo i ON sh.repo_id = i.repo_id " "LEFT JOIN RepoInfo i2 ON v.origin_repo = i2.repo_id, Branch b " "WHERE to_email=? AND " "sh.repo_id = b.repo_id AND " "b.name = 'master' " "ORDER BY i.update_time DESC, sh.repo_id"; } else { /* should never reach here */ seaf_warning ("[share mgr] Wrong column type"); return NULL; } if (seaf_db_statement_foreach_row (mgr->seaf->db, sql, collect_repos, &ret, 1, "string", email) < 0) { seaf_warning ("[share mgr] DB error when get shared repo id and email " "for %s.\n", email); for (p = ret; p; p = p->next) g_object_unref (p->data); g_list_free (ret); if (db_err) *db_err = TRUE; return NULL; } } else { if (g_strcmp0 (type, "from_email") == 0) { sql = "SELECT sh.repo_id, v.repo_id, " "to_email, permission, commit_id, s.size, " "v.origin_repo, v.path, i.name, " "i.update_time, i.version, i.is_encrypted, i.last_modifier, i.status, i.type, " "i2.name FROM " "SharedRepo sh LEFT JOIN VirtualRepo v ON " "sh.repo_id=v.repo_id " "LEFT JOIN RepoSize s ON sh.repo_id = s.repo_id " "LEFT JOIN RepoInfo i ON sh.repo_id = i.repo_id " "LEFT JOIN RepoInfo i2 ON v.origin_repo = i2.repo_id, Branch b " "WHERE from_email=? " "AND sh.repo_id = b.repo_id " "AND b.name = 'master' " "ORDER BY i.update_time DESC, sh.repo_id " "LIMIT ? OFFSET ?"; } else if (g_strcmp0 (type, "to_email") == 0) { sql = "SELECT sh.repo_id, v.repo_id, " "from_email, permission, commit_id, s.size, " "v.origin_repo, v.path, i.name, " "i.update_time, i.version, i.is_encrypted, i.last_modifier, i.status, i.type, " "i2.name FROM " "SharedRepo sh LEFT JOIN VirtualRepo v ON " "sh.repo_id=v.repo_id " "LEFT JOIN RepoSize s ON sh.repo_id = s.repo_id " "LEFT JOIN RepoInfo i ON sh.repo_id = i.repo_id " "LEFT JOIN RepoInfo i2 ON v.origin_repo = i2.repo_id, Branch b " "WHERE to_email=? " "AND sh.repo_id = b.repo_id " "AND b.name = 'master' " "ORDER BY i.update_time DESC, sh.repo_id " "LIMIT ? OFFSET ?"; } else { /* should never reach here */ seaf_warning ("[share mgr] Wrong column type"); return NULL; } if (seaf_db_statement_foreach_row (mgr->seaf->db, sql, collect_repos, &ret, 3, "string", email, "int", limit, "int", start) < 0) { seaf_warning ("[share mgr] DB error when get shared repo id and email " "for %s.\n", email); for (p = ret; p; p = p->next) g_object_unref (p->data); g_list_free (ret); if (db_err) *db_err = TRUE; return NULL; } } seaf_fill_repo_commit_if_not_in_db (&ret); return g_list_reverse (ret); } static gboolean collect_shared_to (SeafDBRow *row, void *data) { GList **plist = data; const char *to_email; to_email = seaf_db_row_get_column_text (row, 0); *plist = g_list_prepend (*plist, g_ascii_strdown(to_email, -1)); return TRUE; } GList * seaf_share_manager_list_shared_to (SeafShareManager *mgr, const char *owner, const char *repo_id) { char *sql; GList *ret = NULL; sql = "SELECT to_email FROM SharedRepo WHERE " "from_email=? AND repo_id=?"; if (seaf_db_statement_foreach_row (mgr->seaf->db, sql, collect_shared_to, &ret, 2, "string", owner, "string", repo_id) < 0) { seaf_warning ("[share mgr] DB error when list shared to.\n"); string_list_free (ret); return NULL; } return ret; } static gboolean collect_repo_shared_to (SeafDBRow *row, void *data) { GList **shared_to = data; const char *to_email = seaf_db_row_get_column_text (row, 0); char *email_down = g_ascii_strdown(to_email, -1); const char *perm = seaf_db_row_get_column_text (row, 1); const char *repo_id = seaf_db_row_get_column_text (row, 2); SeafileSharedUser *uobj = g_object_new (SEAFILE_TYPE_SHARED_USER, "repo_id", repo_id, "user", email_down, "perm", perm, NULL); *shared_to = g_list_prepend (*shared_to, uobj); g_free (email_down); return TRUE; } GList * seaf_share_manager_list_repo_shared_to (SeafShareManager *mgr, const char *from_email, const char *repo_id, GError **error) { GList *shared_to = NULL; char *sql = "SELECT to_email, permission, repo_id FROM SharedRepo WHERE " "from_email=? AND repo_id=?"; int ret = seaf_db_statement_foreach_row (mgr->seaf->db, sql, collect_repo_shared_to, &shared_to, 2, "string", from_email, "string", repo_id); if (ret < 0) { seaf_warning ("Failed to list repo %s shared to from db.\n", repo_id); g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "Failed to list repo shared to from db"); while (shared_to) { g_object_unref (shared_to->data); shared_to = g_list_delete_link (shared_to, shared_to); } return NULL; } return shared_to; } static gboolean collect_repo_shared_group (SeafDBRow *row, void *data) { GList **shared_group = data; int group_id = seaf_db_row_get_column_int (row, 0); const char *perm = seaf_db_row_get_column_text (row, 1); const char *repo_id = seaf_db_row_get_column_text (row, 2); SeafileSharedGroup *gobj = g_object_new (SEAFILE_TYPE_SHARED_GROUP, "repo_id", repo_id, "group_id", group_id, "perm", perm, NULL); *shared_group = g_list_prepend (*shared_group, gobj); return TRUE; } GList * seaf_share_manager_list_repo_shared_group (SeafShareManager *mgr, const char *from_email, const char *repo_id, GError **error) { GList *shared_group = NULL; char *sql = "SELECT group_id, permission, repo_id FROM RepoGroup WHERE " "user_name=? AND repo_id=?"; int ret = seaf_db_statement_foreach_row (mgr->seaf->db, sql, collect_repo_shared_group, &shared_group, 2, "string", from_email, "string", repo_id); if (ret < 0) { seaf_warning ("Failed to list repo %s shared group from db.\n", repo_id); g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "Failed to list repo shared group from db"); while (shared_group) { g_object_unref (shared_group->data); shared_group = g_list_delete_link (shared_group, shared_group); } return NULL; } return shared_group; } static gboolean get_shared_dirs_to_user (SeafDBRow *row, void *data) { GHashTable *dirs = data; const char *path = seaf_db_row_get_column_text (row, 0); const char *perm = seaf_db_row_get_column_text (row, 1); g_hash_table_replace (dirs, g_strdup (path), g_strdup (perm)); return TRUE; } static gboolean get_shared_dirs_to_group (SeafDBRow *row, void *data) { GHashTable *dirs = data; const char *path = seaf_db_row_get_column_text (row, 0); const char *perm = seaf_db_row_get_column_text (row, 1); char *prev_perm = g_hash_table_lookup (dirs, path); if (g_strcmp0 (perm, prev_perm) != 0 && (prev_perm == NULL || g_strcmp0 (prev_perm, "r") == 0)) { g_hash_table_replace (dirs, g_strdup (path), g_strdup (perm)); } return TRUE; } // Conver group id list to comma separated str // [1, 2, 3] -> 1,2,3 static GString * convert_group_list_to_str (GList *groups) { GList *iter = groups; CcnetGroup *group; int group_id; GString *group_ids = g_string_new (""); for (; iter; iter = iter->next) { group = iter->data; g_object_get (group, "id", &group_id, NULL); g_string_append_printf (group_ids, "%d,", group_id); } group_ids = g_string_erase (group_ids, group_ids->len - 1, 1); return group_ids; } GHashTable * seaf_share_manager_get_shared_dirs_to_user (SeafShareManager *mgr, const char *orig_repo_id, const char *to_email) { GHashTable *dirs; char *sql; dirs = g_hash_table_new_full (g_str_hash, g_str_equal, g_free, g_free); sql = "SELECT v.path, s.permission FROM SharedRepo s, VirtualRepo v WHERE " "s.repo_id = v.repo_id AND s.to_email = ? AND v.origin_repo = ?"; int ret = seaf_db_statement_foreach_row (mgr->seaf->db, sql, get_shared_dirs_to_user, dirs, 2, "string", to_email, "string", orig_repo_id); if (ret < 0) { seaf_warning ("Failed to get all shared folder perms " "in parent repo %.8s for user %s.\n", orig_repo_id, to_email); g_hash_table_destroy (dirs); return NULL; } return dirs; } GHashTable * seaf_share_manager_get_shared_dirs_to_group (SeafShareManager *mgr, const char *orig_repo_id, GList *groups) { GHashTable *dirs; GString *group_ids; char *sql; dirs = g_hash_table_new_full (g_str_hash, g_str_equal, g_free, g_free); group_ids = convert_group_list_to_str (groups); sql = g_strdup_printf ("SELECT v.path, s.permission " "FROM RepoGroup s, VirtualRepo v WHERE " "s.repo_id = v.repo_id AND v.origin_repo = ? " "AND s.group_id in (%s)", group_ids->str); int ret = seaf_db_statement_foreach_row (mgr->seaf->db, sql, get_shared_dirs_to_group, dirs, 1, "string", orig_repo_id); g_free (sql); g_string_free (group_ids, TRUE); if (ret < 0) { seaf_warning ("Failed to get all shared folder perm from parent repo %.8s " "to all user groups.\n", orig_repo_id); g_hash_table_destroy (dirs); return NULL; } return dirs; } int seaf_share_manager_remove_share (SeafShareManager *mgr, const char *repo_id, const char *from_email, const char *to_email) { if (seaf_db_statement_query (mgr->seaf->db, "DELETE FROM SharedRepo WHERE repo_id = ? AND from_email =" " ? AND to_email = ?", 3, "string", repo_id, "string", from_email, "string", to_email) < 0) return -1; return 0; } int seaf_share_manager_unshare_subdir (SeafShareManager* mgr, const char *orig_repo_id, const char *path, const char *from_email, const char *to_email) { if (seaf_db_statement_query (mgr->seaf->db, "DELETE FROM SharedRepo WHERE " "from_email = ? AND to_email = ? " "AND repo_id IN " "(SELECT repo_id FROM VirtualRepo WHERE " "origin_repo = ? AND path = ?)", 4, "string", from_email, "string", to_email, "string", orig_repo_id, "string", path) < 0) return -1; return 0; } int seaf_share_manager_remove_repo (SeafShareManager *mgr, const char *repo_id) { if (seaf_db_statement_query (mgr->seaf->db, "DELETE FROM SharedRepo WHERE repo_id = ?", 1, "string", repo_id) < 0) return -1; return 0; } char * seaf_share_manager_check_permission (SeafShareManager *mgr, const char *repo_id, const char *email) { char *sql; sql = "SELECT permission FROM SharedRepo WHERE repo_id=? AND to_email=?"; return seaf_db_statement_get_string (mgr->seaf->db, sql, 2, "string", repo_id, "string", email); } static gboolean get_shared_sub_dirs (SeafDBRow *row, void *data) { GHashTable *sub_dirs = data; int dummy; const char *sub_dir = seaf_db_row_get_column_text (row, 0); g_hash_table_replace (sub_dirs, g_strdup(sub_dir), &dummy); return TRUE; } GHashTable * seaf_share_manager_get_shared_sub_dirs (SeafShareManager *mgr, const char *repo_id, const char *path) { GHashTable *sub_dirs = g_hash_table_new_full (g_str_hash, g_str_equal, g_free, NULL); char *pattern; if (strcmp (path, "/") == 0) { pattern = g_strdup_printf("%s%%", path); } else { pattern = g_strdup_printf ("%s/%%", path); } int ret = seaf_db_statement_foreach_row (mgr->seaf->db, "SELECT v.path FROM VirtualRepo v, SharedRepo s " "WHERE v.repo_id = s.repo_id and " "v.origin_repo = ? AND v.path LIKE ?", get_shared_sub_dirs, sub_dirs, 2, "string", repo_id, "string", pattern); if (ret < 0) { g_free (pattern); seaf_warning ("Failed to get shared sub dirs from db.\n"); g_hash_table_destroy (sub_dirs); return NULL; } ret = seaf_db_statement_foreach_row (mgr->seaf->db, "SELECT v.path FROM VirtualRepo v, RepoGroup r " "WHERE v.repo_id = r.repo_id and " "v.origin_repo = ? AND v.path LIKE ?", get_shared_sub_dirs, sub_dirs, 2, "string", repo_id, "string", pattern); g_free (pattern); if (ret < 0) { seaf_warning ("Failed to get shared sub dirs from db.\n"); g_hash_table_destroy (sub_dirs); return NULL; } return sub_dirs; } int seaf_share_manager_is_repo_shared (SeafShareManager *mgr, const char *repo_id) { gboolean ret; gboolean db_err = FALSE; ret = seaf_db_statement_exists (mgr->seaf->db, "SELECT repo_id FROM SharedRepo WHERE " "repo_id = ?", &db_err, 1, "string", repo_id); if (db_err) { seaf_warning ("DB error when check repo exist in SharedRepo.\n"); return -1; } if (!ret) { ret = seaf_db_statement_exists (mgr->seaf->db, "SELECT repo_id FROM RepoGroup WHERE " "repo_id = ?", &db_err, 1, "string", repo_id); if (db_err) { seaf_warning ("DB error when check repo exist in RepoGroup.\n"); return -1; } } return ret; } GObject * seaf_get_shared_repo_by_path (SeafRepoManager *mgr, const char *repo_id, const char *path, const char *shared_to, int is_org, GError **error) { char *sql; char *real_repo_id = NULL; GList *repo = NULL; GObject *ret = NULL; /* If path is NULL, 'repo_id' represents for the repo we want, * otherwise, 'repo_id' represents for the origin repo, * find virtual repo by path first. */ if (path != NULL) { real_repo_id = seaf_repo_manager_get_virtual_repo_id (mgr, repo_id, path, NULL); if (!real_repo_id) { seaf_warning ("Failed to get virtual repo_id by path %s, origin_repo: %s\n", path, repo_id); return NULL; } } if (!real_repo_id) real_repo_id = g_strdup (repo_id); if (!is_org) sql = "SELECT sh.repo_id, v.repo_id, " "from_email, permission, commit_id, s.size, " "v.origin_repo, v.path, i.name, " "i.update_time, i.version, i.is_encrypted, i.last_modifier, i.status, i.type, " "i2.name FROM " "SharedRepo sh LEFT JOIN VirtualRepo v ON " "sh.repo_id=v.repo_id " "LEFT JOIN RepoSize s ON sh.repo_id = s.repo_id " "LEFT JOIN RepoInfo i ON sh.repo_id = i.repo_id " "LEFT JOIN RepoInfo i2 ON v.origin_repo = i2.repo_id, Branch b " "WHERE to_email=? AND " "sh.repo_id = b.repo_id AND sh.repo_id=? AND " "b.name = 'master' "; else sql = "SELECT sh.repo_id, v.repo_id, " "from_email, permission, commit_id, s.size, " "v.origin_repo, v.path, i.name, " "i.update_time, i.version, i.is_encrypted, i.last_modifier, i.status, i.type, " "i2.name FROM " "OrgSharedRepo sh LEFT JOIN VirtualRepo v ON " "sh.repo_id=v.repo_id " "LEFT JOIN RepoSize s ON sh.repo_id = s.repo_id " "LEFT JOIN RepoInfo i ON sh.repo_id = i.repo_id " "LEFT JOIN RepoInfo i2 ON v.origin_repo = i2.repo_id, Branch b " "WHERE to_email=? AND " "sh.repo_id = b.repo_id AND sh.repo_id=? AND " "b.name = 'master' "; /* The list 'repo' should have only one repo, * use existing api collect_repos() to get it. */ if (seaf_db_statement_foreach_row (mgr->seaf->db, sql, collect_repos, &repo, 2, "string", shared_to, "string", real_repo_id) < 0) { g_free (real_repo_id); g_list_free (repo); seaf_warning ("[share mgr] DB error when get shared repo " "for %s, path:%s\n", shared_to, path); return NULL; } g_free (real_repo_id); if (repo) { ret = (GObject *)(repo->data); g_list_free (repo); } return ret; } int seaf_share_manager_unshare_group_subdir (SeafShareManager* mgr, const char *repo_id, const char *path, const char *owner, int group_id) { if (seaf_db_statement_query (mgr->seaf->db, "DELETE FROM RepoGroup WHERE " "user_name = ? AND group_id = ? " "AND repo_id IN " "(SELECT repo_id FROM VirtualRepo WHERE " "origin_repo = ? AND path = ?)", 4, "string", owner, "int", group_id, "string", repo_id, "string", path) < 0) return -1; return 0; } gboolean seaf_share_manager_repo_has_been_shared (SeafShareManager* mgr, const char *repo_id, gboolean including_groups) { gboolean exists; gboolean db_err = FALSE; char *sql; sql = "SELECT 1 FROM SharedRepo WHERE repo_id=?"; exists = seaf_db_statement_exists (mgr->seaf->db, sql, &db_err, 1, "string", repo_id); if (db_err) { seaf_warning ("DB error when check repo exist in SharedRepo and RepoGroup.\n"); return FALSE; } if (!exists && including_groups) { sql = "SELECT 1 FROM RepoGroup WHERE repo_id=?"; exists = seaf_db_statement_exists (mgr->seaf->db, sql, &db_err, 1, "string", repo_id); } return exists; } gboolean get_shared_users_cb (SeafDBRow *row, void *data) { GList **users = data; const char *repo_id = seaf_db_row_get_column_text (row, 0); const char *user = seaf_db_row_get_column_text (row, 1); const char *perm = seaf_db_row_get_column_text (row, 2); SeafileSharedUser *uobj = g_object_new (SEAFILE_TYPE_SHARED_USER, "repo_id", repo_id, "user", user, "perm", perm, NULL); *users = g_list_append (*users, uobj); return TRUE; } GList * seaf_share_manager_org_get_shared_users_by_repo (SeafShareManager* mgr, int org_id, const char *repo_id) { GList *users = NULL; char *sql = "SELECT repo_id, to_email, permission FROM OrgSharedRepo WHERE org_id=? AND " "repo_id=?"; int ret = seaf_db_statement_foreach_row (mgr->seaf->db, sql, get_shared_users_cb, &users, 2, "int", org_id, "string", repo_id); if (ret < 0) { seaf_warning("Failed to get users by repo_id[%s], org_id[%d]\n", repo_id, org_id); return NULL; } return users; } GList * seaf_share_manager_get_shared_users_by_repo(SeafShareManager* mgr, const char *repo_id) { GList *users = NULL; char *sql = "SELECT repo_id, to_email, permission FROM SharedRepo WHERE " "repo_id=?"; int ret = seaf_db_statement_foreach_row (mgr->seaf->db, sql, get_shared_users_cb, &users, 1, "string", repo_id); if (ret < 0) { seaf_warning("Failed to get users by repo_id[%s]\n", repo_id); return NULL; } return users; } ================================================ FILE: server/share-mgr.h ================================================ /* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */ #ifndef SHARE_MGR_H #define SHARE_MGR_H #include struct _SeafileSession; typedef struct _SeafShareManager SeafShareManager; typedef struct _SeafShareManagerPriv SeafShareManagerPriv; typedef struct _ShareRepoInfo ShareRepoInfo; struct _SeafShareManager { struct _SeafileSession *seaf; }; SeafShareManager* seaf_share_manager_new (struct _SeafileSession *seaf); int seaf_share_manager_start (SeafShareManager *mgr); int seaf_share_manager_add_share (SeafShareManager *mgr, const char *repo_id, const char *from_email, const char *to_email, const char *permission); int seaf_share_manager_set_subdir_perm_by_path (SeafShareManager *mgr, const char *repo_id, const char *from_email, const char *to_email, const char *permission, const char *path); int seaf_share_manager_set_permission (SeafShareManager *mgr, const char *repo_id, const char *from_email, const char *to_email, const char *permission); GList* seaf_share_manager_list_share_repos (SeafShareManager *mgr, const char *email, const char *type, int start, int limit, gboolean *db_err); GList * seaf_share_manager_list_shared_to (SeafShareManager *mgr, const char *owner, const char *repo_id); GList * seaf_share_manager_list_repo_shared_to (SeafShareManager *mgr, const char *owner, const char *repo_id, GError **error); GList * seaf_share_manager_list_repo_shared_group (SeafShareManager *mgr, const char *from_email, const char *repo_id, GError **error); GHashTable * seaf_share_manager_get_shared_dirs_to_user (SeafShareManager *mgr, const char *orig_repo_id, const char *to_email); GHashTable * seaf_share_manager_get_shared_dirs_to_group (SeafShareManager *mgr, const char *orig_repo_id, GList *groups); int seaf_share_manager_remove_share (SeafShareManager *mgr, const char *repo_id, const char *from_email, const char *to_email); int seaf_share_manager_unshare_subdir (SeafShareManager* mgr, const char *orig_repo_id, const char *path, const char *from_email, const char *to_email); /* Remove all share info of a repo. */ int seaf_share_manager_remove_repo (SeafShareManager *mgr, const char *repo_id); char * seaf_share_manager_check_permission (SeafShareManager *mgr, const char *repo_id, const char *email); GHashTable * seaf_share_manager_get_shared_sub_dirs (SeafShareManager *mgr, const char *repo_id, const char *path); int seaf_share_manager_is_repo_shared (SeafShareManager *mgr, const char *repo_id); GObject * seaf_get_shared_repo_by_path (SeafRepoManager *mgr, const char *repo_id, const char *path, const char *shared_to, int is_org, GError **error); int seaf_share_manager_unshare_group_subdir (SeafShareManager* mgr, const char *repo_id, const char *path, const char *owner, int group_id); gboolean seaf_share_manager_repo_has_been_shared (SeafShareManager* mgr, const char *repo_id, gboolean including_groups); GList * seaf_share_manager_org_get_shared_users_by_repo (SeafShareManager* mgr, int org_id, const char *repo_id); GList * seaf_share_manager_get_shared_users_by_repo (SeafShareManager* mgr, const char *repo_id); #endif /* SHARE_MGR_H */ ================================================ FILE: server/size-sched.c ================================================ #include "common.h" #include #include "seafile-session.h" #include "size-sched.h" #include "diff-simple.h" #define DEBUG_FLAG SEAFILE_DEBUG_OTHER #include "log.h" #include "obj-cache.h" #define REPO_SIZE_LIST "repo_size_task" typedef struct SizeSchedulerPriv { pthread_t thread_id; GThreadPool *compute_repo_size_thread_pool; struct ObjCache *cache; } SizeSchedulerPriv; typedef struct RepoSizeJob { SizeScheduler *sched; char repo_id[37]; } RepoSizeJob; typedef struct RepoInfo { gchar *head_id; gint64 size; gint64 file_count; } RepoInfo; static void* compute_repo_size (void *vjob); static void compute_task (void *data, void *user_data); static void* log_unprocessed_task_thread (void *arg); #define DEFAULT_SCHEDULE_THREAD_NUMBER 1; SizeScheduler * size_scheduler_new (SeafileSession *session) { GError *error = NULL; SizeScheduler *sched = g_new0 (SizeScheduler, 1); int sched_thread_num; if (!sched) return NULL; sched->priv = g_new0 (SizeSchedulerPriv, 1); if (!sched->priv) { g_free (sched); return NULL; } sched->priv->cache = session->obj_cache; sched->seaf = session; sched_thread_num = g_key_file_get_integer (session->config, "scheduler", "size_sched_thread_num", NULL); if (sched_thread_num == 0) sched_thread_num = DEFAULT_SCHEDULE_THREAD_NUMBER; sched->priv->compute_repo_size_thread_pool = g_thread_pool_new (compute_task, NULL, sched_thread_num, FALSE, &error); if (!sched->priv->compute_repo_size_thread_pool) { if (error) { seaf_warning ("Failed to create compute repo size thread pool: %s.\n", error->message); } else { seaf_warning ("Failed to create repo size thread pool.\n"); } g_clear_error (&error); g_free (sched->priv); g_free (sched); return NULL; } return sched; } int size_scheduler_start (SizeScheduler *scheduler) { int ret = pthread_create (&scheduler->priv->thread_id, NULL, log_unprocessed_task_thread, scheduler); if (ret < 0) { seaf_warning ("Failed to create log unprocessed task thread.\n"); return -1; } pthread_detach (scheduler->priv->thread_id); return 0; } void schedule_repo_size_computation (SizeScheduler *scheduler, const char *repo_id) { RepoSizeJob *job = g_new0(RepoSizeJob, 1); job->sched = scheduler; memcpy (job->repo_id, repo_id, 37); g_thread_pool_push (scheduler->priv->compute_repo_size_thread_pool, job, NULL); } #define PRINT_UNPROCESSED_TASKS_INTERVAL 30 void *log_unprocessed_task_thread (void *arg) { SizeScheduler *sched = arg; guint unprocessed_num; while (1) { unprocessed_num = g_thread_pool_unprocessed (sched->priv->compute_repo_size_thread_pool); if (unprocessed_num > 10) seaf_message ("The number of repo size update tasks in queue is %u\n", unprocessed_num); sleep (PRINT_UNPROCESSED_TASKS_INTERVAL); } return NULL; } static void compute_task (void *data, void *user_data) { RepoSizeJob *job = data; compute_repo_size (job); g_free (job); } static gboolean get_head_id (SeafDBRow *row, void *data) { char *head_id_out = data; const char *head_id; head_id = seaf_db_row_get_column_text (row, 0); memcpy (head_id_out, head_id, 40); return FALSE; } static int set_repo_size_and_file_count (SeafDB *db, const char *repo_id, const char *new_head_id, gint64 size, gint64 file_count) { SeafDBTrans *trans; char *sql; char cached_head_id[41] = {0}; int ret = 0; trans = seaf_db_begin_transaction (db); if (!trans) return -1; sql = "SELECT head_id FROM RepoSize WHERE repo_id=?"; int n = seaf_db_trans_foreach_selected_row (trans, sql, get_head_id, cached_head_id, 1, "string", repo_id); if (n < 0) { ret = -1; goto rollback; } if (n == 0) { /* Size not set before. */ sql = "INSERT INTO RepoSize (repo_id, size, head_id) VALUES (?, ?, ?)"; if (seaf_db_trans_query (trans, sql, 3, "string", repo_id, "int64", size, "string", new_head_id) < 0) { ret = -1; goto rollback; } } else { sql = "UPDATE RepoSize SET size = ?, head_id = ? WHERE repo_id = ?"; if (seaf_db_trans_query (trans, sql, 3, "int64", size, "string", new_head_id, "string", repo_id) < 0) { ret = -1; goto rollback; } } gboolean exist; gboolean db_err; exist = seaf_db_trans_check_for_existence (trans, "SELECT 1 FROM RepoFileCount WHERE repo_id=?", &db_err, 1, "string", repo_id); if (db_err) { ret = -1; goto rollback; } if (exist) { if (seaf_db_trans_query (trans, "UPDATE RepoFileCount SET file_count=? WHERE repo_id=?", 2, "int64", file_count, "string", repo_id) < 0) { ret = -1; goto rollback; } } else { if (seaf_db_trans_query (trans, "INSERT INTO RepoFileCount (repo_id,file_count) VALUES (?,?)", 2, "string", repo_id, "int64", file_count) < 0) { ret = -1; goto rollback; } } if (seaf_db_commit (trans) < 0) { ret = -1; goto rollback; } seaf_db_trans_close (trans); return ret; rollback: seaf_db_rollback (trans); seaf_db_trans_close (trans); return ret; } static gboolean create_old_repo_info (SeafDBRow *row, void *data) { RepoInfo **info = data; const char *head_id = seaf_db_row_get_column_text (row, 0); gint64 size = seaf_db_row_get_column_int64 (row, 1); gint64 file_count = seaf_db_row_get_column_int64 (row, 2); if (!head_id) return FALSE; *info = g_new0(RepoInfo, 1); if (!*info) return FALSE; (*info)->head_id = g_strdup(head_id); (*info)->size = size; (*info)->file_count = file_count; return TRUE; } static RepoInfo* get_old_repo_info_from_db (SeafDB *db, const char *repo_id, gboolean *is_db_err) { RepoInfo *info = NULL; char *sql; switch (seaf_db_type (db)) { case SEAF_DB_TYPE_MYSQL: case SEAF_DB_TYPE_PGSQL: sql = "select s.head_id,s.size,f.file_count FROM " "RepoSize s LEFT JOIN RepoFileCount f ON " "s.repo_id=f.repo_id WHERE " "s.repo_id=? FOR UPDATE"; break; case SEAF_DB_TYPE_SQLITE: sql = "select s.head_id,s.size,f.file_count FROM " "RepoSize s LEFT JOIN RepoFileCount f ON " "s.repo_id=f.repo_id WHERE " "s.repo_id=?"; break; default: seaf_warning("Unexpected database type.\n"); *is_db_err = TRUE; return NULL; } int ret = seaf_db_statement_foreach_row (db, sql, create_old_repo_info, &info, 1, "string", repo_id); if (ret < 0) *is_db_err = TRUE; return info; } static void notify_repo_size_change (SizeScheduler *sched, const char *repo_id) { ObjCache *cache = sched->priv->cache; if (!cache) { return; } json_t *obj = NULL; char *msg = NULL; obj = json_object (); json_object_set_new (obj, "repo_id", json_string(repo_id)); msg = json_dumps (obj, JSON_COMPACT); objcache_push (cache, REPO_SIZE_LIST, msg); out: g_free (msg); json_decref (obj); } static void* compute_repo_size (void *vjob) { RepoSizeJob *job = vjob; SizeScheduler *sched = job->sched; SeafRepo *repo = NULL; SeafCommit *head = NULL; SeafCommit *old_head = NULL; GObject *file_count_info = NULL; gint64 size = 0; gint64 file_count = 0; int ret; RepoInfo *info = NULL; GError *error = NULL; gboolean is_db_err = FALSE; repo = seaf_repo_manager_get_repo (sched->seaf->repo_mgr, job->repo_id); if (!repo) { seaf_warning ("[scheduler] failed to get repo %s.\n", job->repo_id); return vjob; } info = get_old_repo_info_from_db(sched->seaf->db, job->repo_id, &is_db_err); if (is_db_err) goto out; if (info && g_strcmp0 (info->head_id, repo->head->commit_id) == 0) goto out; head = seaf_commit_manager_get_commit (sched->seaf->commit_mgr, repo->id, repo->version, repo->head->commit_id); if (!head) { seaf_warning ("[scheduler] failed to get head commit %s.\n", repo->head->commit_id); goto out; } if (info) old_head = seaf_commit_manager_get_commit (sched->seaf->commit_mgr, repo->id, repo->version, info->head_id); if (info && (info->file_count != 0) && old_head){ gint64 change_size = 0; gint64 change_file_count = 0; GList *diff_entries = NULL; ret = diff_commits (old_head, head, &diff_entries, FALSE); if (ret < 0) { seaf_warning("[scheduler] failed to do diff.\n"); goto out; } GList *des = NULL; for (des = diff_entries; des ; des = des->next){ DiffEntry *diff_entry = des->data; if (diff_entry->status == DIFF_STATUS_DELETED){ change_size -= diff_entry->size; --change_file_count; } else if (diff_entry->status == DIFF_STATUS_ADDED){ change_size += diff_entry->size; ++change_file_count; } else if (diff_entry->status == DIFF_STATUS_MODIFIED) change_size = change_size + diff_entry->size - diff_entry->origin_size; } size = info->size + change_size; file_count = info->file_count + change_file_count; g_list_free_full (diff_entries, (GDestroyNotify)diff_entry_free); } else { file_count_info = seaf_fs_manager_get_file_count_info_by_path (seaf->fs_mgr, repo->store_id, repo->version, repo->root_id, "/", &error); if (!file_count_info) { seaf_warning ("[scheduler] failed to get file count info.\n"); g_clear_error (&error); goto out; } g_object_get (file_count_info, "file_count", &file_count, "size", &size, NULL); g_object_unref (file_count_info); } ret = set_repo_size_and_file_count (sched->seaf->db, job->repo_id, repo->head->commit_id, size, file_count); if (ret < 0) { seaf_warning ("[scheduler] failed to store repo size and file count %s.\n", job->repo_id); goto out; } notify_repo_size_change (sched, repo->store_id); out: seaf_repo_unref (repo); seaf_commit_unref (head); seaf_commit_unref (old_head); if (info) g_free (info->head_id); g_free (info); return vjob; } ================================================ FILE: server/size-sched.h ================================================ #ifndef SIZE_SCHEDULER_H #define SIZE_SCHEDULER_H struct _SeafileSession; struct SizeSchedulerPriv; typedef struct SizeScheduler { struct _SeafileSession *seaf; struct SizeSchedulerPriv *priv; } SizeScheduler; SizeScheduler * size_scheduler_new (struct _SeafileSession *session); int size_scheduler_start (SizeScheduler *scheduler); void schedule_repo_size_computation (SizeScheduler *scheduler, const char *repo_id); #endif ================================================ FILE: server/upload-file.c ================================================ #include "common.h" #ifdef HAVE_EVHTP #define DEBUG_FLAG SEAFILE_DEBUG_HTTP #include "log.h" #include #include #if defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__) #include #else #include #endif #include #include #include #include "seafile-object.h" #include "utils.h" #include "seafile-session.h" #include "upload-file.h" #include "http-status-codes.h" #include "http-server.h" #include "seafile-error.h" enum RecvState { RECV_INIT, RECV_HEADERS, RECV_CONTENT, RECV_ERROR, }; enum UploadError { ERROR_FILENAME, ERROR_EXISTS, ERROR_NOT_EXIST, ERROR_SIZE, ERROR_QUOTA, ERROR_FORBIDDEN, ERROR_RECV, ERROR_BLOCK_MISSING, ERROR_INTERNAL, }; typedef struct Progress { gint64 uploaded; gint64 size; } Progress; typedef struct RecvFSM { int state; char *repo_id; char *user; char *boundary; /* boundary of multipart form-data. */ char *input_name; /* input name of the current form field. */ char *parent_dir; evbuf_t *line; /* buffer for a line */ GHashTable *form_kvs; /* key/value of form fields */ GList *filenames; /* uploaded file names */ GList *files; /* paths for completely uploaded tmp files. */ gboolean recved_crlf; /* Did we recv a CRLF when write out the last line? */ char *file_name; char *tmp_file; /* tmp file path for the currently uploading file */ int fd; char *resumable_tmp_file; /* resumable upload tmp file path. In resumable uploads, contents of the chunks are appended to this tmp file. */ /* For upload progress. */ char *progress_id; Progress *progress; char *token_type; /* For sending statistic type */ gboolean need_idx_progress; gint64 rstart; gint64 rend; gint64 fsize; } RecvFSM; #define MAX_CONTENT_LINE 10240 static GHashTable *upload_progress; static pthread_mutex_t pg_lock; static int write_block_data_to_tmp_file (RecvFSM *fsm, const char *parent_dir, const char *file_name); /* IE8 will set filename to the full path of the uploaded file. * So we need to strip out the basename from it. */ static char * get_basename (const char *path) { int i = strlen(path) - 1; while (i >= 0) { if (path[i] == '/' || path[i] == '\\') break; --i; } if (i < 0) return g_strdup(path); return g_strdup(&path[i+1]); } /* It's a bug of libevhtp that it doesn't set Content-Length automatically * in response to a multipart request. * Just add it in our code. */ static void set_content_length_header (evhtp_request_t *req) { char lstr[128]; #ifdef WIN32 snprintf(lstr, sizeof(lstr), "%lu", (unsigned long)(evbuffer_get_length(req->buffer_out))); #else snprintf(lstr, sizeof(lstr), "%zu", evbuffer_get_length(req->buffer_out)); #endif evhtp_headers_add_header(req->headers_out, evhtp_header_new("Content-Length", lstr, 1, 1)); } static gint64 get_content_length (evhtp_request_t *req) { const char *content_len_str = evhtp_kv_find (req->headers_in, "Content-Length"); if (!content_len_str) { return -1; } return strtoll (content_len_str, NULL, 10); } static void send_error_reply (evhtp_request_t *req, evhtp_res code, char *error) { if (error) evbuffer_add_printf (req->buffer_out, "{\"error\": \"%s\"}", error); set_content_length_header (req); evhtp_headers_add_header ( req->headers_out, evhtp_header_new("Content-Type", "application/json; charset=utf-8", 1, 1)); evhtp_send_reply (req, code); } static void send_success_reply (evhtp_request_t *req) { set_content_length_header (req); evhtp_headers_add_header ( req->headers_out, evhtp_header_new("Content-Type", "application/json; charset=utf-8", 1, 1)); evhtp_send_reply (req, EVHTP_RES_OK); } static void send_success_reply_ie8_compatible (evhtp_request_t *req, evhtp_res code) { set_content_length_header (req); const char *accept = evhtp_kv_find (req->headers_in, "Accept"); if (accept && strstr (accept, "application/json") != NULL) { evhtp_headers_add_header ( req->headers_out, evhtp_header_new("Content-Type", "application/json; charset=utf-8", 1, 1)); } else { evhtp_headers_add_header ( req->headers_out, evhtp_header_new("Content-Type", "text/plain", 1, 1)); } evhtp_send_reply (req, code); } static void send_reply_by_error_code (evhtp_request_t *req, int error_code) { switch (error_code) { case ERROR_FILENAME: send_error_reply (req, SEAF_HTTP_RES_BADFILENAME, "Invalid filename.\n"); break; case ERROR_EXISTS: send_error_reply (req, SEAF_HTTP_RES_EXISTS, "File already exists.\n"); break; case ERROR_NOT_EXIST: send_error_reply (req, SEAF_HTTP_RES_NOT_EXISTS, "File does not exist.\n"); break; case ERROR_SIZE: send_error_reply (req, SEAF_HTTP_RES_TOOLARGE, "File size is too large.\n"); break; case ERROR_QUOTA: send_error_reply (req, SEAF_HTTP_RES_NOQUOTA, "Out of quota.\n"); break; case ERROR_BLOCK_MISSING: send_error_reply (req, SEAF_HTTP_RES_BLOCK_MISSING, "Block missing.\n"); break; case ERROR_FORBIDDEN: send_error_reply (req, SEAF_HTTP_RES_FORBIDDEN, "Permission denied."); break; case ERROR_RECV: case ERROR_INTERNAL: send_error_reply (req, EVHTP_RES_SERVERR, "Internal error\n"); break; } } static gboolean check_tmp_file_list (GList *tmp_files, int *error_code) { GList *ptr; char *tmp_file; SeafStat st; gint64 total_size = 0; for (ptr = tmp_files; ptr; ptr = ptr->next) { tmp_file = ptr->data; if (seaf_stat (tmp_file, &st) < 0) { seaf_warning ("[upload] Failed to stat temp file %s.\n", tmp_file); *error_code = ERROR_RECV; return FALSE; } total_size += (gint64)st.st_size; } if (seaf->max_upload_size > 0 && total_size > seaf->max_upload_size) { seaf_debug ("[upload] File size is too large.\n"); *error_code = ERROR_SIZE; return FALSE; } return TRUE; } static char * get_canonical_path (const char *path) { char *ret = g_strdup (path); char *p; for (p = ret; *p != 0; ++p) { if (*p == '\\') *p = '/'; } /* Remove trailing slashes from dir path. */ int len = strlen(ret); int i = len - 1; while (i >= 0 && ret[i] == '/') ret[i--] = 0; return ret; } static gboolean check_parent_dir (evhtp_request_t *req, const char *repo_id, const char *parent_dir) { char *canon_path = NULL; SeafRepo *repo = NULL; SeafCommit *commit = NULL; SeafDir *dir = NULL; GError *error = NULL; gboolean ret = TRUE; repo = seaf_repo_manager_get_repo (seaf->repo_mgr, repo_id); if (!repo) { seaf_warning ("[upload] Failed to get repo %.8s.\n", repo_id); send_error_reply (req, EVHTP_RES_SERVERR, "Failed to get repo.\n"); return FALSE; } commit = seaf_commit_manager_get_commit (seaf->commit_mgr, repo->id, repo->version, repo->head->commit_id); if (!commit) { seaf_warning ("[upload] Failed to get head commit for repo %.8s.\n", repo_id); send_error_reply (req, EVHTP_RES_SERVERR, "Failed to get head commit.\n"); seaf_repo_unref (repo); return FALSE; } canon_path = get_canonical_path (parent_dir); dir = seaf_fs_manager_get_seafdir_by_path (seaf->fs_mgr, repo->store_id, repo->version, commit->root_id, canon_path, &error); if (dir) { seaf_dir_free (dir); } else { send_error_reply (req, EVHTP_RES_BADREQ, "Parent dir doesn't exist.\n"); ret = FALSE; } g_clear_error (&error); g_free (canon_path); seaf_commit_unref (commit); seaf_repo_unref (repo); return ret; } static gboolean is_parent_matched (const char *upload_dir, const char *parent_dir) { gboolean ret = TRUE; char *upload_dir_canon = NULL; char *parent_dir_canon = NULL; upload_dir_canon = get_canonical_path (upload_dir); parent_dir_canon = get_canonical_path (parent_dir); if (strcmp (upload_dir_canon,parent_dir_canon) != 0) { ret = FALSE; } g_free (upload_dir_canon); g_free (parent_dir_canon); return ret; } static char * file_list_to_json (GList *files) { json_t *array; GList *ptr; char *file; char *json_data; char *ret; array = json_array (); for (ptr = files; ptr; ptr = ptr->next) { file = ptr->data; json_array_append_new (array, json_string(file)); } json_data = json_dumps (array, 0); json_decref (array); ret = g_strdup (json_data); free (json_data); return ret; } static int create_relative_path (RecvFSM *fsm, char *parent_dir, char *relative_path) { int rc = 0; GError *error = NULL; if (!relative_path) return 0; rc = seaf_repo_manager_mkdir_with_parents (seaf->repo_mgr, fsm->repo_id, parent_dir, relative_path, fsm->user, &error); if (rc < 0) { if (error) { seaf_warning ("[upload folder] %s.", error->message); g_clear_error (&error); } } return rc; } static char * file_id_list_from_json (const char *ret_json) { json_t *array, *obj, *value; json_error_t err; size_t index; GString *id_list; array = json_loadb (ret_json, strlen(ret_json), 0, &err); if (!array) { seaf_warning ("Failed to load ret_json: %s.\n", err.text); return NULL; } id_list = g_string_new (NULL); size_t n = json_array_size (array); for (index = 0; index < n; index++) { obj = json_array_get (array, index); value = json_object_get (obj, "id"); const char *id = json_string_value (value); g_string_append (id_list, id); if (index != n - 1) g_string_append (id_list, "\t"); } json_decref (array); return g_string_free (id_list, FALSE); } static gint64 rfc3339_to_timestamp (const char *last_modify) { if (!last_modify) { return -1; } GDateTime *date_time = g_date_time_new_from_iso8601(last_modify, NULL); if (!date_time) { return -1; } gint64 mtime = g_date_time_to_unix(date_time); g_date_time_unref(date_time); return mtime; } static void upload_api_cb(evhtp_request_t *req, void *arg) { RecvFSM *fsm = arg; char *parent_dir, *replace_str; char *relative_path = NULL, *new_parent_dir = NULL; char *last_modify = NULL; gint64 mtime = 0; GError *error = NULL; int error_code = -1; char *filenames_json, *tmp_files_json; int replace = 0; int rc; evhtp_headers_add_header (req->headers_out, evhtp_header_new("Access-Control-Allow-Headers", "x-requested-with, content-type, content-range, content-disposition, accept, origin, authorization", 1, 1)); evhtp_headers_add_header (req->headers_out, evhtp_header_new("Access-Control-Allow-Methods", "GET, POST, PUT, PATCH, DELETE, OPTIONS", 1, 1)); evhtp_headers_add_header (req->headers_out, evhtp_header_new("Access-Control-Allow-Origin", "*", 1, 1)); evhtp_headers_add_header (req->headers_out, evhtp_header_new("Access-Control-Max-Age", "86400", 1, 1)); if (evhtp_request_get_method(req) == htp_method_OPTIONS) { /* If CORS preflight header, then create an empty body response (200 OK) * and return it. */ send_success_reply (req); return; } /* After upload_headers_cb() returns an error, libevhtp may still * receive data from the web browser and call into this cb. * In this case fsm will be NULL. */ if (!fsm || fsm->state == RECV_ERROR) return; if (!fsm->filenames) { seaf_debug ("[upload] No file uploaded.\n"); send_error_reply (req, EVHTP_RES_BADREQ, "No file uploaded.\n"); return; } last_modify = g_hash_table_lookup (fsm->form_kvs, "last_modify"); if (last_modify) { mtime = rfc3339_to_timestamp (last_modify); } replace_str = g_hash_table_lookup (fsm->form_kvs, "replace"); if (replace_str) { replace = atoi(replace_str); if (replace != 0 && replace != 1) { seaf_debug ("[Upload] Invalid argument replace: %s.\n", replace_str); send_error_reply (req, EVHTP_RES_BADREQ, "Invalid argument replace.\n"); return; } } parent_dir = g_hash_table_lookup (fsm->form_kvs, "parent_dir"); if (!parent_dir) { seaf_debug ("[upload] No parent dir given.\n"); send_error_reply (req, EVHTP_RES_BADREQ, "Invalid parent dir.\n"); return; } relative_path = g_hash_table_lookup (fsm->form_kvs, "relative_path"); if (relative_path != NULL) { if (relative_path[0] == '/' || relative_path[0] == '\\') { seaf_warning ("Invalid relative path %s.\n", relative_path); send_error_reply (req, EVHTP_RES_BADREQ, "Invalid relative path."); return; } char *tmp_p = get_canonical_path(parent_dir); char *tmp_r = get_canonical_path(relative_path); new_parent_dir = g_build_path("/", tmp_p, tmp_r, NULL); g_free(tmp_p); g_free(tmp_r); } else { new_parent_dir = get_canonical_path(parent_dir); } if (fsm->rstart >= 0) { if (fsm->filenames->next) { seaf_debug ("[upload] Breakpoint transfer only support one file in one request.\n"); send_error_reply (req, EVHTP_RES_BADREQ, "More files in one request.\n"); goto out; } if (parent_dir[0] != '/') { seaf_debug ("[upload] Invalid parent dir, should start with /.\n"); send_error_reply (req, EVHTP_RES_BADREQ, "Invalid parent dir.\n"); goto out; } if (!fsm->resumable_tmp_file) fsm->resumable_tmp_file = g_build_path ("/", new_parent_dir, (char *)fsm->filenames->data, NULL); if (write_block_data_to_tmp_file (fsm, new_parent_dir, (char *)fsm->filenames->data) < 0) { error_code = ERROR_INTERNAL; goto out; } if (fsm->rend != fsm->fsize - 1) { const char *success_str = "{\"success\": true}"; evbuffer_add (req->buffer_out, success_str, strlen(success_str)); send_success_reply_ie8_compatible (req, EVHTP_RES_OK); goto out; } } if (!fsm->files) { seaf_debug ("[upload] No file uploaded.\n"); send_error_reply (req, EVHTP_RES_BADREQ, "No file uploaded.\n"); goto out; } if (!check_parent_dir (req, fsm->repo_id, parent_dir)) goto out; if (!fsm->parent_dir || !is_parent_matched (fsm->parent_dir, parent_dir)){ error_code = ERROR_FORBIDDEN; goto out; } if (!check_tmp_file_list (fsm->files, &error_code)) goto out; gint64 content_len; if (fsm->fsize > 0) content_len = fsm->fsize; else content_len = get_content_length (req); if (seaf_quota_manager_check_quota_with_delta (seaf->quota_mgr, fsm->repo_id, content_len) != 0) { error_code = ERROR_QUOTA; goto out; } rc = create_relative_path (fsm, parent_dir, relative_path); if (rc < 0) { error_code = ERROR_INTERNAL; goto out; } filenames_json = file_list_to_json (fsm->filenames); tmp_files_json = file_list_to_json (fsm->files); char *ret_json = NULL; char *task_id = NULL; rc = seaf_repo_manager_post_multi_files (seaf->repo_mgr, fsm->repo_id, new_parent_dir, filenames_json, tmp_files_json, fsm->user, replace, mtime, &ret_json, fsm->need_idx_progress ? &task_id : NULL, &error); g_free (filenames_json); g_free (tmp_files_json); if (rc < 0) { error_code = ERROR_INTERNAL; if (error) { if (error->code == POST_FILE_ERR_FILENAME) { error_code = ERROR_FILENAME; } else if (error->code == SEAF_ERR_FILES_WITH_SAME_NAME) { error_code = -1; send_error_reply (req, EVHTP_RES_BADREQ, "Too many files with same name.\n"); } else if (error->code == SEAF_ERR_GC_CONFLICT) { error_code = -1; send_error_reply (req, EVHTP_RES_CONFLICT, "GC Conflict.\n"); } g_clear_error (&error); } goto out; } if (task_id) { evbuffer_add (req->buffer_out, task_id, strlen(task_id)); g_free (task_id); } else { const char *use_json = evhtp_kv_find (req->uri->query, "ret-json"); if (use_json) { evbuffer_add (req->buffer_out, ret_json, strlen(ret_json)); } else { char *new_ids = file_id_list_from_json (ret_json); if (new_ids) evbuffer_add (req->buffer_out, new_ids, strlen(new_ids)); g_free (new_ids); } } g_free (ret_json); send_success_reply (req); char *oper = "web-file-upload"; if (g_strcmp0(fsm->token_type, "upload-link") == 0) oper = "link-file-upload"; send_statistic_msg(fsm->repo_id, fsm->user, oper, (guint64)content_len); out: g_free(new_parent_dir); send_reply_by_error_code (req, error_code); return; } static void upload_raw_blks_api_cb(evhtp_request_t *req, void *arg) { RecvFSM *fsm = arg; GError *error = NULL; int error_code = -1; char *blockids_json, *tmp_files_json; /* After upload_headers_cb() returns an error, libevhtp may still * receive data from the web browser and call into this cb. * In this case fsm will be NULL. */ if (!fsm || fsm->state == RECV_ERROR) return; if (!check_tmp_file_list (fsm->files, &error_code)) goto out; blockids_json = file_list_to_json (fsm->filenames); tmp_files_json = file_list_to_json (fsm->files); int rc = seaf_repo_manager_post_blocks (seaf->repo_mgr, fsm->repo_id, blockids_json, tmp_files_json, fsm->user, &error); g_free (blockids_json); g_free (tmp_files_json); if (rc < 0) { error_code = ERROR_INTERNAL; if (error) { if (error->code == POST_FILE_ERR_FILENAME) { error_code = ERROR_FILENAME; } g_clear_error (&error); } goto out; } guint64 content_len = (guint64)get_content_length(req); send_statistic_msg(fsm->repo_id, fsm->user, "web-file-upload", content_len); evbuffer_add (req->buffer_out, "\"OK\"", 4); send_success_reply (req); out: send_reply_by_error_code (req, error_code); return; } static void upload_blks_api_cb(evhtp_request_t *req, void *arg) { RecvFSM *fsm = arg; const char *parent_dir, *file_name, *size_str, *replace_str, *commitonly_str; char *last_modify = NULL; gint64 mtime = 0; GError *error = NULL; int error_code = -1; char *blockids_json; gint64 file_size = -1; int replace = 0; /* After upload_headers_cb() returns an error, libevhtp may still * receive data from the web browser and call into this cb. * In this case fsm will be NULL. */ if (!fsm || fsm->state == RECV_ERROR) return; replace_str = g_hash_table_lookup (fsm->form_kvs, "replace"); if (replace_str) { replace = atoi(replace_str); if (replace != 0 && replace != 1) { seaf_debug ("[Upload-blks] Invalid argument replace: %s.\n", replace_str); send_error_reply (req, EVHTP_RES_BADREQ, "Invalid argument replace.\n"); return; } } parent_dir = g_hash_table_lookup (fsm->form_kvs, "parent_dir"); file_name = g_hash_table_lookup (fsm->form_kvs, "file_name"); size_str = g_hash_table_lookup (fsm->form_kvs, "file_size"); if (size_str) file_size = atoll(size_str); commitonly_str = evhtp_kv_find (req->uri->query, "commitonly"); last_modify = g_hash_table_lookup (fsm->form_kvs, "last_modify"); if (last_modify) { mtime = rfc3339_to_timestamp (last_modify); } if (!file_name || !parent_dir || !size_str || file_size < 0) { seaf_debug ("[upload-blks] No parent dir or file name given.\n"); send_error_reply (req, EVHTP_RES_BADREQ, "No parent dir or file name.\n"); return; } if (!commitonly_str) { send_error_reply (req, EVHTP_RES_BADREQ, "Only commit suppported.\n"); return; } if (!check_parent_dir (req, fsm->repo_id, parent_dir)) return; char *new_file_id = NULL; int rc = 0; /* if (!commitonly_str) { */ /* gint64 content_len = get_content_length (req); */ /* if (seaf_quota_manager_check_quota_with_delta (seaf->quota_mgr, */ /* fsm->repo_id, */ /* content_len) != 0) { */ /* error_code = ERROR_QUOTA; */ /* goto error; */ /* } */ /* if (!check_tmp_file_list (fsm->files, &error_code)) */ /* goto error; */ /* blockids_json = file_list_to_json (fsm->filenames); */ /* tmp_files_json = file_list_to_json (fsm->files); */ /* rc = seaf_repo_manager_post_file_blocks (seaf->repo_mgr, */ /* fsm->repo_id, */ /* parent_dir, */ /* file_name, */ /* blockids_json, */ /* tmp_files_json, */ /* fsm->user, */ /* file_size, */ /* replace, */ /* &new_file_id, */ /* &error); */ /* g_free (blockids_json); */ /* g_free (tmp_files_json); */ /* } else { */ blockids_json = g_hash_table_lookup (fsm->form_kvs, "blockids"); if (blockids_json == NULL) { seaf_debug ("[upload-blks] No blockids given.\n"); send_error_reply (req, EVHTP_RES_BADREQ, "No blockids.\n"); return; } rc = seaf_repo_manager_commit_file_blocks (seaf->repo_mgr, fsm->repo_id, parent_dir, file_name, blockids_json, fsm->user, file_size, replace, mtime, &new_file_id, &error); if (rc < 0) { error_code = ERROR_INTERNAL; if (error) { if (error->code == POST_FILE_ERR_FILENAME) { error_code = ERROR_FILENAME; } else if (error->code == POST_FILE_ERR_BLOCK_MISSING) { error_code = ERROR_BLOCK_MISSING; } else if (error->code == POST_FILE_ERR_QUOTA_FULL) { error_code = ERROR_QUOTA; } else if (error->code == SEAF_ERR_GC_CONFLICT) { error_code = -1; send_error_reply (req, EVHTP_RES_CONFLICT, "GC Conflict.\n"); } g_clear_error (&error); } goto out; } const char *use_json = evhtp_kv_find (req->uri->query, "ret-json"); if (use_json) { json_t *json = json_object (); json_object_set_string_member(json, "id", new_file_id); char *json_data = json_dumps (json, 0); evbuffer_add (req->buffer_out, json_data, strlen(json_data)); json_decref (json); free (json_data); } else { evbuffer_add (req->buffer_out, "\"", 1); evbuffer_add (req->buffer_out, new_file_id, strlen(new_file_id)); evbuffer_add (req->buffer_out, "\"", 1); } send_success_reply (req); out: g_free (new_file_id); send_reply_by_error_code (req, error_code); return; } /* static void */ /* upload_blks_ajax_cb(evhtp_request_t *req, void *arg) */ /* { */ /* RecvFSM *fsm = arg; */ /* char *parent_dir, *file_name, *size_str; */ /* GError *error = NULL; */ /* int error_code = ERROR_INTERNAL; */ /* char *blockids_json, *tmp_files_json; */ /* gint64 file_size = -1; */ /* evhtp_headers_add_header (req->headers_out, */ /* evhtp_header_new("Access-Control-Allow-Headers", */ /* "x-requested-with, content-type, accept, origin, authorization", 1, 1)); */ /* evhtp_headers_add_header (req->headers_out, */ /* evhtp_header_new("Access-Control-Allow-Methods", */ /* "GET, POST, PUT, PATCH, DELETE, OPTIONS", 1, 1)); */ /* evhtp_headers_add_header (req->headers_out, */ /* evhtp_header_new("Access-Control-Allow-Origin", */ /* "*", 1, 1)); */ /* evhtp_headers_add_header (req->headers_out, */ /* evhtp_header_new("Access-Control-Max-Age", */ /* "86400", 1, 1)); */ /* if (evhtp_request_get_method(req) == htp_method_OPTIONS) { */ /* /\* If CORS preflight header, then create an empty body response (200 OK) */ /* * and return it. */ /* *\/ */ /* send_success_reply (req); */ /* return; */ /* } */ /* /\* After upload_headers_cb() returns an error, libevhtp may still */ /* * receive data from the web browser and call into this cb. */ /* * In this case fsm will be NULL. */ /* *\/ */ /* if (!fsm || fsm->state == RECV_ERROR) */ /* return; */ /* parent_dir = g_hash_table_lookup (fsm->form_kvs, "parent_dir"); */ /* file_name = g_hash_table_lookup (fsm->form_kvs, "file_name"); */ /* size_str = g_hash_table_lookup (fsm->form_kvs, "file_size"); */ /* if (size_str) */ /* file_size = atoll(size_str); */ /* if (!file_name || !parent_dir || !size_str || file_size < 0) { */ /* seaf_debug ("[upload-blks] No parent dir or file name given.\n"); */ /* send_error_reply (req, EVHTP_RES_BADREQ, "Invalid URL.\n"); */ /* return; */ /* } */ /* if (!check_parent_dir (req, fsm->repo_id, parent_dir)) */ /* return; */ /* if (!check_tmp_file_list (fsm->files, &error_code)) */ /* goto error; */ /* gint64 content_len = get_content_length (req); */ /* if (seaf_quota_manager_check_quota_with_delta (seaf->quota_mgr, */ /* fsm->repo_id, */ /* content_len) != 0) { */ /* error_code = ERROR_QUOTA; */ /* goto error; */ /* } */ /* blockids_json = file_list_to_json (fsm->filenames); */ /* tmp_files_json = file_list_to_json (fsm->files); */ /* int rc = seaf_repo_manager_post_file_blocks (seaf->repo_mgr, */ /* fsm->repo_id, */ /* parent_dir, */ /* file_name, */ /* blockids_json, */ /* tmp_files_json, */ /* fsm->user, */ /* file_size, */ /* 0, */ /* NULL, */ /* &error); */ /* g_free (blockids_json); */ /* g_free (tmp_files_json); */ /* if (rc < 0) { */ /* if (error) { */ /* if (error->code == POST_FILE_ERR_FILENAME) { */ /* error_code = ERROR_FILENAME; */ /* } */ /* g_clear_error (&error); */ /* } */ /* goto error; */ /* } */ /* send_success_reply (req); */ /* return; */ /* error: */ /* switch (error_code) { */ /* case ERROR_FILENAME: */ /* send_error_reply (req, SEAF_HTTP_RES_BADFILENAME, "Invalid filename."); */ /* break; */ /* case ERROR_EXISTS: */ /* send_error_reply (req, SEAF_HTTP_RES_EXISTS, "File already exists."); */ /* break; */ /* case ERROR_SIZE: */ /* send_error_reply (req, SEAF_HTTP_RES_TOOLARGE, "File size is too large."); */ /* break; */ /* case ERROR_QUOTA: */ /* send_error_reply (req, SEAF_HTTP_RES_NOQUOTA, "Out of quota."); */ /* break; */ /* case ERROR_RECV: */ /* case ERROR_INTERNAL: */ /* send_error_reply (req, EVHTP_RES_SERVERR, "Internal error.\n"); */ /* break; */ /* } */ /* } */ static int copy_block_to_tmp_file (int blk_fd, int tmp_fd, gint64 offset) { if (lseek(blk_fd, 0, SEEK_SET) < 0) { seaf_warning ("Failed to rewind block temp file position to start: %s\n", strerror(errno)); return -1; } if (lseek(tmp_fd, offset, SEEK_SET) <0) { seaf_warning ("Failed to rewind web upload temp file write position: %s\n", strerror(errno)); return -1; } char buf[8192]; int buf_len = sizeof(buf); ssize_t len; while (TRUE) { len = readn (blk_fd, buf, buf_len); if (len < 0) { seaf_warning ("Failed to read content from block temp file: %s.\n", strerror(errno)); return -1; } else if (len == 0) { return 0; } if (writen (tmp_fd, buf, len) != len) { seaf_warning ("Failed to write content to temp file: %s.\n", strerror(errno)); return -1; } } } static int write_block_data_to_tmp_file (RecvFSM *fsm, const char *parent_dir, const char *file_name) { char *abs_path; char *temp_file = NULL; GError *error = NULL; int tmp_fd = -1; int ret = 0; HttpServerStruct *htp_server = seaf->http_server; int cluster_shared_temp_file_mode = htp_server->cluster_shared_temp_file_mode; abs_path = g_build_path ("/", parent_dir, file_name, NULL); temp_file = seaf_repo_manager_get_upload_tmp_file (seaf->repo_mgr, fsm->repo_id, abs_path, &error); if (error) { seaf_warning ("%s\n", error->message); g_clear_error (&error); ret = -1; goto out; } if (!temp_file) { temp_file = g_strdup_printf ("%s/cluster-shared/%sXXXXXX", seaf->http_server->http_temp_dir, file_name); tmp_fd = g_mkstemp_full (temp_file, O_RDWR, cluster_shared_temp_file_mode); if (tmp_fd < 0) { seaf_warning ("Failed to create upload temp file: %s.\n", strerror(errno)); ret = -1; goto out; } if (seaf_repo_manager_add_upload_tmp_file (seaf->repo_mgr, fsm->repo_id, abs_path, temp_file, &error) < 0) { seaf_warning ("%s\n", error->message); g_clear_error (&error); close (tmp_fd); g_unlink (temp_file); tmp_fd = -1; ret = -1; goto out; } } else { tmp_fd = g_open (temp_file, O_WRONLY); if (tmp_fd < 0) { seaf_warning ("Failed to open upload temp file: %s.\n", strerror(errno)); if (errno == ENOENT) { seaf_message ("Upload temp file %s doesn't exist, remove record from db.\n", temp_file); seaf_repo_manager_del_upload_tmp_file (seaf->repo_mgr, fsm->repo_id, abs_path, &error); } ret = -1; goto out; } } if (copy_block_to_tmp_file (fsm->fd, tmp_fd, fsm->rstart) < 0) { ret = -1; goto out; } if (fsm->rend == fsm->fsize - 1) { // For the last block, record tmp_files for upload to seafile and remove fsm->files = g_list_prepend (fsm->files, g_strdup(temp_file)); // for virus checking, indexing... } out: g_free (abs_path); if (tmp_fd >= 0) { close (tmp_fd); } g_free (temp_file); close (fsm->fd); g_unlink (fsm->tmp_file); g_free (fsm->tmp_file); fsm->tmp_file = NULL; return ret; } /* Handle AJAX file upload. @return an array of json data, e.g. [{"name": "foo.txt"}] */ static void upload_ajax_cb(evhtp_request_t *req, void *arg) { RecvFSM *fsm = arg; char *parent_dir = NULL, *relative_path = NULL, *new_parent_dir = NULL; char *last_modify = NULL; gint64 mtime = 0; GError *error = NULL; int error_code = -1; char *filenames_json, *tmp_files_json; int rc; evhtp_headers_add_header (req->headers_out, evhtp_header_new("Access-Control-Allow-Headers", "x-requested-with, content-type, content-range, content-disposition, accept, origin, authorization", 1, 1)); evhtp_headers_add_header (req->headers_out, evhtp_header_new("Access-Control-Allow-Methods", "GET, POST, PUT, PATCH, DELETE, OPTIONS", 1, 1)); evhtp_headers_add_header (req->headers_out, evhtp_header_new("Access-Control-Allow-Origin", "*", 1, 1)); evhtp_headers_add_header (req->headers_out, evhtp_header_new("Access-Control-Max-Age", "86400", 1, 1)); if (evhtp_request_get_method(req) == htp_method_OPTIONS) { /* If CORS preflight header, then create an empty body response (200 OK) * and return it. */ send_success_reply (req); return; } /* After upload_headers_cb() returns an error, libevhtp may still * receive data from the web browser and call into this cb. * In this case fsm will be NULL. */ if (!fsm || fsm->state == RECV_ERROR) return; parent_dir = g_hash_table_lookup (fsm->form_kvs, "parent_dir"); if (!parent_dir) { seaf_debug ("[upload] No parent dir given.\n"); send_error_reply (req, EVHTP_RES_BADREQ, "Invalid parent dir."); return; } last_modify = g_hash_table_lookup (fsm->form_kvs, "last_modify"); if (last_modify) { mtime = rfc3339_to_timestamp (last_modify); } if (!fsm->filenames) { seaf_debug ("[upload] No file uploaded.\n"); send_error_reply (req, EVHTP_RES_BADREQ, "No file uploaded.\n"); return; } relative_path = g_hash_table_lookup (fsm->form_kvs, "relative_path"); if (relative_path != NULL) { if (relative_path[0] == '/' || relative_path[0] == '\\') { seaf_warning ("Invalid relative path %s.\n", relative_path); send_error_reply (req, EVHTP_RES_BADREQ, "Invalid relative path."); return; } char *tmp_p = get_canonical_path(parent_dir); char *tmp_r = get_canonical_path(relative_path); new_parent_dir = g_build_path("/", tmp_p, tmp_r, NULL); g_free(tmp_p); g_free(tmp_r); } else { new_parent_dir = get_canonical_path(parent_dir); } if (fsm->rstart >= 0) { if (fsm->filenames->next) { seaf_debug ("[upload] Breakpoint transfer only support one file in one request.\n"); send_error_reply (req, EVHTP_RES_BADREQ, "More files in one request.\n"); goto out; } if (parent_dir[0] != '/') { seaf_debug ("[upload] Invalid parent dir, should start with /.\n"); send_error_reply (req, EVHTP_RES_BADREQ, "Invalid parent dir.\n"); goto out; } if (!fsm->resumable_tmp_file) fsm->resumable_tmp_file = g_build_path ("/", new_parent_dir, (char *)fsm->filenames->data, NULL); if (write_block_data_to_tmp_file (fsm, new_parent_dir, (char *)fsm->filenames->data) < 0) { error_code = ERROR_INTERNAL; goto out; } if (fsm->rend != fsm->fsize - 1) { const char *success_str = "{\"success\": true}"; evbuffer_add (req->buffer_out, success_str, strlen(success_str)); send_success_reply_ie8_compatible (req, EVHTP_RES_OK); goto out; } } if (!fsm->files) { seaf_debug ("[upload] No file uploaded.\n"); send_error_reply (req, EVHTP_RES_BADREQ, "No file uploaded.\n"); goto out; } if (!check_parent_dir (req, fsm->repo_id, parent_dir)) goto out; if (!fsm->parent_dir || !is_parent_matched (fsm->parent_dir, parent_dir)){ error_code = ERROR_FORBIDDEN; goto out; } if (!check_tmp_file_list (fsm->files, &error_code)) goto out; gint64 content_len; if (fsm->fsize > 0) content_len = fsm->fsize; else content_len = get_content_length (req); if (seaf_quota_manager_check_quota_with_delta (seaf->quota_mgr, fsm->repo_id, content_len) != 0) { error_code = ERROR_QUOTA; goto out; } rc = create_relative_path (fsm, parent_dir, relative_path); if (rc < 0) { error_code = ERROR_INTERNAL; goto out; } filenames_json = file_list_to_json (fsm->filenames); tmp_files_json = file_list_to_json (fsm->files); char *ret_json = NULL; char *task_id = NULL; rc = seaf_repo_manager_post_multi_files (seaf->repo_mgr, fsm->repo_id, new_parent_dir, filenames_json, tmp_files_json, fsm->user, 0, mtime, &ret_json, fsm->need_idx_progress ? &task_id : NULL, &error); g_free (filenames_json); g_free (tmp_files_json); if (rc < 0) { error_code = ERROR_INTERNAL; if (error) { if (error->code == POST_FILE_ERR_FILENAME) { error_code = ERROR_FILENAME; } else if (error->code == SEAF_ERR_FILES_WITH_SAME_NAME) { error_code = -1; send_error_reply (req, EVHTP_RES_BADREQ, "Too many files with same name.\n"); } else if (error->code == SEAF_ERR_GC_CONFLICT) { error_code = -1; send_error_reply (req, EVHTP_RES_CONFLICT, "GC Conflict.\n"); } g_clear_error (&error); } goto out; } if (task_id) { evbuffer_add (req->buffer_out, task_id, strlen(task_id)); g_free (task_id); } else { evbuffer_add (req->buffer_out, ret_json, strlen(ret_json)); } g_free (ret_json); send_success_reply_ie8_compatible (req, EVHTP_RES_OK); char *oper = "web-file-upload"; if (g_strcmp0(fsm->token_type, "upload-link") == 0) oper = "link-file-upload"; send_statistic_msg(fsm->repo_id, fsm->user, oper, (guint64)content_len); out: g_free (new_parent_dir); send_reply_by_error_code (req, error_code); return; } static void update_api_cb(evhtp_request_t *req, void *arg) { RecvFSM *fsm = arg; char *target_file, *parent_dir = NULL, *filename = NULL; char *last_modify = NULL; gint64 mtime = 0; const char *head_id = NULL; GError *error = NULL; int error_code = -1; char *new_file_id = NULL; evhtp_headers_add_header (req->headers_out, evhtp_header_new("Access-Control-Allow-Headers", "x-requested-with, content-type, content-range, content-disposition, accept, origin, authorization", 1, 1)); evhtp_headers_add_header (req->headers_out, evhtp_header_new("Access-Control-Allow-Methods", "GET, POST, PUT, PATCH, DELETE, OPTIONS", 1, 1)); evhtp_headers_add_header (req->headers_out, evhtp_header_new("Access-Control-Allow-Origin", "*", 1, 1)); evhtp_headers_add_header (req->headers_out, evhtp_header_new("Access-Control-Max-Age", "86400", 1, 1)); if (evhtp_request_get_method(req) == htp_method_OPTIONS) { /* If CORS preflight header, then create an empty body response (200 OK) * and return it. */ send_success_reply (req); return; } if (!fsm || fsm->state == RECV_ERROR) return; if (!fsm->filenames) { seaf_debug ("[Update] No file uploaded.\n"); send_error_reply (req, EVHTP_RES_BADREQ, "No file uploaded.\n"); return; } target_file = g_hash_table_lookup (fsm->form_kvs, "target_file"); if (!target_file) { seaf_debug ("[Update] No target file given.\n"); send_error_reply (req, EVHTP_RES_BADREQ, "No target file.\n"); return; } last_modify = g_hash_table_lookup (fsm->form_kvs, "last_modify"); if (last_modify) { mtime = rfc3339_to_timestamp (last_modify); } parent_dir = g_path_get_dirname (target_file); filename = g_path_get_basename (target_file); if (!filename || filename[0] == '\0') { seaf_debug ("[Update] Bad target_file.\n"); send_error_reply (req, EVHTP_RES_BADREQ, "Invalid targe_file.\n"); goto out; } if (fsm->rstart >= 0) { if (fsm->filenames->next) { seaf_debug ("[Update] Breakpoint transfer only support one file in one request.\n"); send_error_reply (req, EVHTP_RES_BADREQ, "More than one file in one request.\n"); goto out; } if (parent_dir[0] != '/') { seaf_debug ("[Update] Invalid parent dir, should start with /.\n"); send_error_reply (req, EVHTP_RES_BADREQ, "Invalid parent dir.\n"); goto out; } if (!fsm->resumable_tmp_file) fsm->resumable_tmp_file = g_build_path ("/", parent_dir, filename, NULL); if (write_block_data_to_tmp_file (fsm, parent_dir, filename) < 0) { send_error_reply (req, EVHTP_RES_SERVERR, "Internal error.\n"); goto out; } if (fsm->rend != fsm->fsize - 1) { const char *success_str = "{\"success\": true}"; evbuffer_add (req->buffer_out, success_str, strlen(success_str)); send_success_reply_ie8_compatible (req, EVHTP_RES_OK); goto out; } } if (!fsm->files) { seaf_debug ("[Update] No file uploaded.\n"); send_error_reply (req, EVHTP_RES_BADREQ, "No file uploaded.\n"); goto out; } if (!check_parent_dir (req, fsm->repo_id, parent_dir)) goto out; if (!check_tmp_file_list (fsm->files, &error_code)) goto out; head_id = evhtp_kv_find (req->uri->query, "head"); gint64 content_len; if (fsm->fsize > 0) content_len = fsm->fsize; else content_len = get_content_length (req); if (seaf_quota_manager_check_quota_with_delta (seaf->quota_mgr, fsm->repo_id, content_len) != 0) { error_code = ERROR_QUOTA; goto out; } int rc = seaf_repo_manager_put_file (seaf->repo_mgr, fsm->repo_id, (char *)(fsm->files->data), parent_dir, filename, fsm->user, head_id, mtime, &new_file_id, &error); if (rc < 0) { error_code = ERROR_INTERNAL; if (error) { if (g_strcmp0 (error->message, "file does not exist") == 0) { error_code = ERROR_NOT_EXIST; } g_clear_error (&error); } goto out; } /* Send back the new file id, so that the mobile client can update local cache */ evbuffer_add(req->buffer_out, new_file_id, strlen(new_file_id)); send_success_reply (req); out: if (fsm->rstart >= 0 && fsm->rend == fsm->fsize - 1) { // File upload success, try to remove tmp file from WebUploadTmpFile table char *abs_path; abs_path = g_build_path ("/", parent_dir, filename, NULL); seaf_repo_manager_del_upload_tmp_file (seaf->repo_mgr, fsm->repo_id, abs_path, NULL); g_free (abs_path); } g_free (parent_dir); g_free (filename); g_free (new_file_id); send_reply_by_error_code (req, error_code); return; } static void update_blks_api_cb(evhtp_request_t *req, void *arg) { RecvFSM *fsm = arg; char *target_file, *parent_dir = NULL, *filename = NULL, *size_str = NULL; char *last_modify = NULL; gint64 mtime = 0; const char *commitonly_str; GError *error = NULL; int error_code = -1; char *new_file_id = NULL; char *blockids_json; gint64 file_size = -1; if (!fsm || fsm->state == RECV_ERROR) return; target_file = g_hash_table_lookup (fsm->form_kvs, "target_file"); size_str = g_hash_table_lookup (fsm->form_kvs, "file_size"); if (size_str) file_size = atoll(size_str); if (!target_file || !size_str || file_size < 0) { seaf_debug ("[Update-blks] No target file given.\n"); send_error_reply (req, EVHTP_RES_BADREQ, "No target file.\n"); return; } commitonly_str = evhtp_kv_find (req->uri->query, "commitonly"); if (!commitonly_str) { send_error_reply (req, EVHTP_RES_BADREQ, "Only commit supported.\n"); return; } last_modify = g_hash_table_lookup (fsm->form_kvs, "last_modify"); if (last_modify) { mtime = rfc3339_to_timestamp (last_modify); } parent_dir = g_path_get_dirname (target_file); filename = g_path_get_basename (target_file); if (!check_parent_dir (req, fsm->repo_id, parent_dir)) goto out; int rc = 0; /* if (!commitonly_str) { */ /* gint64 content_len = get_content_length(req); */ /* if (seaf_quota_manager_check_quota_with_delta (seaf->quota_mgr, */ /* fsm->repo_id, */ /* content_len) != 0) { */ /* error_code = ERROR_QUOTA; */ /* goto error; */ /* } */ /* if (!check_tmp_file_list (fsm->files, &error_code)) */ /* goto error; */ /* blockids_json = file_list_to_json (fsm->filenames); */ /* tmp_files_json = file_list_to_json (fsm->files); */ /* rc = seaf_repo_manager_put_file_blocks (seaf->repo_mgr, */ /* fsm->repo_id, */ /* parent_dir, */ /* filename, */ /* blockids_json, */ /* tmp_files_json, */ /* fsm->user, */ /* head_id, */ /* file_size, */ /* &new_file_id, */ /* &error); */ /* g_free (blockids_json); */ /* g_free (tmp_files_json); */ /* } else { */ blockids_json = g_hash_table_lookup (fsm->form_kvs, "blockids"); if (blockids_json == NULL) { seaf_debug ("[upload-blks] No blockids given.\n"); send_error_reply (req, EVHTP_RES_BADREQ, "No blockids.\n"); goto out; } rc = seaf_repo_manager_commit_file_blocks (seaf->repo_mgr, fsm->repo_id, parent_dir, filename, blockids_json, fsm->user, file_size, 1, mtime, &new_file_id, &error); if (rc < 0) { error_code = ERROR_INTERNAL; if (error) { if (g_strcmp0 (error->message, "file does not exist") == 0) { error_code = ERROR_NOT_EXIST; } else if (error->code == POST_FILE_ERR_QUOTA_FULL) { error_code = ERROR_QUOTA; } else if (error->code == SEAF_ERR_GC_CONFLICT) { error_code = -1; send_error_reply (req, EVHTP_RES_CONFLICT, "GC Conflict.\n"); } g_clear_error (&error); } goto out; } /* Send back the new file id, so that the mobile client can update local cache */ evbuffer_add(req->buffer_out, new_file_id, strlen(new_file_id)); send_success_reply (req); out: g_free (parent_dir); g_free (filename); g_free (new_file_id); send_reply_by_error_code (req, error_code); return; } /* static void */ /* update_blks_ajax_cb(evhtp_request_t *req, void *arg) */ /* { */ /* RecvFSM *fsm = arg; */ /* char *target_file, *parent_dir = NULL, *filename = NULL, *size_str = NULL; */ /* const char *head_id = NULL; */ /* GError *error = NULL; */ /* int error_code = ERROR_INTERNAL; */ /* char *blockids_json, *tmp_files_json; */ /* gint64 file_size = -1; */ /* evhtp_headers_add_header (req->headers_out, */ /* evhtp_header_new("Access-Control-Allow-Headers", */ /* "x-requested-with, content-type, accept, origin, authorization", 1, 1)); */ /* evhtp_headers_add_header (req->headers_out, */ /* evhtp_header_new("Access-Control-Allow-Methods", */ /* "GET, POST, PUT, PATCH, DELETE, OPTIONS", 1, 1)); */ /* evhtp_headers_add_header (req->headers_out, */ /* evhtp_header_new("Access-Control-Allow-Origin", */ /* "*", 1, 1)); */ /* evhtp_headers_add_header (req->headers_out, */ /* evhtp_header_new("Access-Control-Max-Age", */ /* "86400", 1, 1)); */ /* if (evhtp_request_get_method(req) == htp_method_OPTIONS) { */ /* /\* If CORS preflight header, then create an empty body response (200 OK) */ /* * and return it. */ /* *\/ */ /* send_success_reply (req); */ /* return; */ /* } */ /* if (!fsm || fsm->state == RECV_ERROR) */ /* return; */ /* target_file = g_hash_table_lookup (fsm->form_kvs, "target_file"); */ /* size_str = g_hash_table_lookup (fsm->form_kvs, "file_size"); */ /* if (size_str) file_size = atoll(size_str); */ /* if (!target_file || !size_str || file_size < 0) { */ /* seaf_debug ("[Update-blks] No target file given.\n"); */ /* send_error_reply (req, EVHTP_RES_BADREQ, "Invalid URL.\n"); */ /* return; */ /* } */ /* parent_dir = g_path_get_dirname (target_file); */ /* filename = g_path_get_basename (target_file); */ /* if (!check_parent_dir (req, fsm->repo_id, parent_dir)) */ /* return; */ /* if (!check_tmp_file_list (fsm->files, &error_code)) */ /* goto error; */ /* head_id = evhtp_kv_find (req->uri->query, "head"); */ /* gint64 content_len = get_content_length (req); */ /* if (seaf_quota_manager_check_quota_with_delta (seaf->quota_mgr, */ /* fsm->repo_id, */ /* content_len) != 0) { */ /* error_code = ERROR_QUOTA; */ /* goto error; */ /* } */ /* blockids_json = file_list_to_json (fsm->filenames); */ /* tmp_files_json = file_list_to_json (fsm->files); */ /* int rc = seaf_repo_manager_put_file_blocks (seaf->repo_mgr, */ /* fsm->repo_id, */ /* parent_dir, */ /* filename, */ /* blockids_json, */ /* tmp_files_json, */ /* fsm->user, */ /* head_id, */ /* file_size, */ /* NULL, */ /* &error); */ /* g_free (blockids_json); */ /* g_free (tmp_files_json); */ /* g_free (parent_dir); */ /* g_free (filename); */ /* if (rc < 0) { */ /* if (error) { */ /* if (g_strcmp0 (error->message, "file does not exist") == 0) { */ /* error_code = ERROR_NOT_EXIST; */ /* } */ /* g_clear_error (&error); */ /* } */ /* goto error; */ /* } */ /* send_success_reply (req); */ /* return; */ /* error: */ /* switch (error_code) { */ /* case ERROR_FILENAME: */ /* send_error_reply (req, SEAF_HTTP_RES_BADFILENAME, "Invalid filename.\n"); */ /* break; */ /* case ERROR_EXISTS: */ /* send_error_reply (req, SEAF_HTTP_RES_EXISTS, "File already exists.\n"); */ /* break; */ /* case ERROR_SIZE: */ /* send_error_reply (req, SEAF_HTTP_RES_TOOLARGE, "File size is too large.\n"); */ /* break; */ /* case ERROR_QUOTA: */ /* send_error_reply (req, SEAF_HTTP_RES_NOQUOTA, "Out of quota.\n"); */ /* break; */ /* case ERROR_NOT_EXIST: */ /* send_error_reply (req, SEAF_HTTP_RES_NOT_EXISTS, "File does not exist.\n"); */ /* break; */ /* case ERROR_RECV: */ /* case ERROR_INTERNAL: */ /* default: */ /* send_error_reply (req, EVHTP_RES_SERVERR, "Internal error.\n"); */ /* break; */ /* } */ /* } */ static char * format_update_json_ret (const char *filename, const char *file_id, gint64 size) { json_t *array, *obj; char *json_data; char *ret; array = json_array (); obj = json_object (); json_object_set_string_member (obj, "name", filename); json_object_set_string_member (obj, "id", file_id); json_object_set_int_member (obj, "size", size); json_array_append_new (array, obj); json_data = json_dumps (array, 0); json_decref (array); ret = g_strdup (json_data); free (json_data); return ret; } static void update_ajax_cb(evhtp_request_t *req, void *arg) { RecvFSM *fsm = arg; char *target_file, *parent_dir = NULL, *filename = NULL; char *last_modify = NULL; gint64 mtime = 0; const char *head_id = NULL; GError *error = NULL; int error_code = -1; char *new_file_id = NULL; gint64 size; evhtp_headers_add_header (req->headers_out, evhtp_header_new("Access-Control-Allow-Headers", "x-requested-with, content-type, accept, origin, authorization", 1, 1)); evhtp_headers_add_header (req->headers_out, evhtp_header_new("Access-Control-Allow-Methods", "GET, POST, PUT, PATCH, DELETE, OPTIONS", 1, 1)); evhtp_headers_add_header (req->headers_out, evhtp_header_new("Access-Control-Allow-Origin", "*", 1, 1)); evhtp_headers_add_header (req->headers_out, evhtp_header_new("Access-Control-Max-Age", "86400", 1, 1)); if (evhtp_request_get_method(req) == htp_method_OPTIONS) { /* If CORS preflight header, then create an empty body response (200 OK) * and return it. */ send_success_reply (req); return; } if (!fsm || fsm->state == RECV_ERROR) return; if (!fsm->files) { seaf_debug ("[update] No file uploaded.\n"); send_error_reply (req, EVHTP_RES_BADREQ, "No file uploaded.\n"); return; } target_file = g_hash_table_lookup (fsm->form_kvs, "target_file"); if (!target_file) { seaf_debug ("[Update] No target file given.\n"); send_error_reply (req, EVHTP_RES_BADREQ, "No target file."); return; } last_modify = g_hash_table_lookup (fsm->form_kvs, "last_modify"); if (last_modify) { mtime = rfc3339_to_timestamp (last_modify); } parent_dir = g_path_get_dirname (target_file); filename = g_path_get_basename (target_file); if (!check_parent_dir (req, fsm->repo_id, parent_dir)) goto out; if (!check_tmp_file_list (fsm->files, &error_code)) goto out; SeafStat st; char *tmp_file_path = fsm->files->data; if (seaf_stat (tmp_file_path, &st) < 0) { seaf_warning ("Failed to stat tmp file %s.\n", tmp_file_path); error_code = ERROR_INTERNAL; goto out; } size = (gint64)st.st_size; head_id = evhtp_kv_find (req->uri->query, "head"); gint64 content_len = get_content_length (req); if (seaf_quota_manager_check_quota_with_delta (seaf->quota_mgr, fsm->repo_id, content_len) != 0) { error_code = ERROR_QUOTA; goto out; } int rc = seaf_repo_manager_put_file (seaf->repo_mgr, fsm->repo_id, (char *)(fsm->files->data), parent_dir, filename, fsm->user, head_id, mtime, &new_file_id, &error); if (rc < 0) { error_code = ERROR_INTERNAL; if (error) { if (g_strcmp0 (error->message, "file does not exist") == 0) { error_code = ERROR_NOT_EXIST; } else if (error->code == SEAF_ERR_GC_CONFLICT) { error_code = -1; send_error_reply (req, EVHTP_RES_CONFLICT, "GC Conflict.\n"); } g_clear_error (&error); } goto out; } send_statistic_msg(fsm->repo_id, fsm->user, "web-file-upload", (guint64)content_len); char *json_ret = format_update_json_ret (filename, new_file_id, size); evbuffer_add (req->buffer_out, json_ret, strlen(json_ret)); send_success_reply (req); g_free (json_ret); out: g_free (parent_dir); g_free (new_file_id); g_free (filename); send_reply_by_error_code (req, error_code); return; } /* static void upload_link_cb(evhtp_request_t *req, void *arg) { return upload_api_cb (req, arg); } */ static evhtp_res upload_finish_cb (evhtp_request_t *req, void *arg) { RecvFSM *fsm = arg; GList *ptr; seaf_metric_manager_in_flight_request_dec (seaf->metric_mgr); if (!fsm) return EVHTP_RES_OK; /* Clean up FSM struct no matter upload succeed or not. */ g_free (fsm->parent_dir); g_free (fsm->user); g_free (fsm->boundary); g_free (fsm->input_name); g_free (fsm->token_type); g_hash_table_destroy (fsm->form_kvs); g_free (fsm->file_name); if (fsm->tmp_file) { close (fsm->fd); // For resumable upload, in case tmp file not be deleted if (fsm->rstart >= 0) { g_unlink (fsm->tmp_file); } } g_free (fsm->tmp_file); if (fsm->resumable_tmp_file) { if (fsm->rstart >= 0 && fsm->rend == fsm->fsize - 1) { seaf_repo_manager_del_upload_tmp_file (seaf->repo_mgr, fsm->repo_id, fsm->resumable_tmp_file, NULL); } g_free (fsm->resumable_tmp_file); } g_free (fsm->repo_id); if (!fsm->need_idx_progress) { for (ptr = fsm->files; ptr; ptr = ptr->next) g_unlink ((char *)(ptr->data)); } string_list_free (fsm->filenames); string_list_free (fsm->files); evbuffer_free (fsm->line); if (fsm->progress_id) { pthread_mutex_lock (&pg_lock); g_hash_table_remove (upload_progress, fsm->progress_id); pthread_mutex_unlock (&pg_lock); /* fsm->progress has been free'd by g_hash_table_remove(). */ g_free (fsm->progress_id); } g_free (fsm); return EVHTP_RES_OK; } static char * get_mime_header_param_value (const char *param) { char *first_quote, *last_quote; char *value; // param may not start with double quotes. first_quote = strchr (param, '\"'); if (!first_quote) { return g_strdup (param); } last_quote = strrchr (param, '\"'); if (!first_quote || !last_quote || first_quote == last_quote) { seaf_debug ("[upload] Invalid mime param %s.\n", param); return NULL; } value = g_strndup (first_quote + 1, last_quote - first_quote - 1); return value; } static char * parse_file_name_from_header (evhtp_request_t *req) { const char *dispose = NULL; char **p; char **params; char *dec_file_name = NULL; dispose = evhtp_kv_find (req->headers_in, "Content-Disposition"); if (!dispose) return NULL; params = g_strsplit (dispose, ";", 2); for (p = params; *p != NULL; ++p) *p = g_strstrip (*p); if (g_strv_length (params) != 2 || strcasecmp (params[0], "attachment") != 0 || strncasecmp (params[1], "filename", strlen("filename")) != 0) { seaf_warning ("[upload] Invalid Content-Disposition header.\n"); g_strfreev (params); return NULL; } char *file_name = get_mime_header_param_value (params[1]); if (file_name) dec_file_name = g_uri_unescape_string (file_name, NULL); g_free (file_name); g_strfreev (params); return dec_file_name; } static int parse_mime_header (evhtp_request_t *req, char *header, RecvFSM *fsm) { char *colon; char **params, **p; colon = strchr (header, ':'); if (!colon) { seaf_debug ("[upload] bad mime header format.\n"); return -1; } *colon = 0; // Content-Disposition is case-insensitive. if (strcasecmp (header, "Content-Disposition") == 0) { params = g_strsplit (colon + 1, ";", 3); for (p = params; *p != NULL; ++p) *p = g_strstrip (*p); if (g_strv_length (params) < 2) { seaf_debug ("[upload] Too little params for mime header.\n"); g_strfreev (params); return -1; } if (strcasecmp (params[0], "form-data") != 0) { seaf_debug ("[upload] Invalid Content-Disposition\n"); g_strfreev (params); return -1; } for (p = params; *p != NULL; ++p) { if (strncasecmp (*p, "name", strlen("name")) == 0) { fsm->input_name = get_mime_header_param_value (*p); break; } } if (!fsm->input_name) { seaf_debug ("[upload] No input-name given.\n"); g_strfreev (params); return -1; } if (strcmp (fsm->input_name, "file") == 0) { char *file_name; for (p = params; *p != NULL; ++p) { if (strncasecmp (*p, "filename", strlen("filename")) == 0) { if (fsm->rstart >= 0) { file_name = parse_file_name_from_header (req); } else { file_name = get_mime_header_param_value (*p); } if (file_name) { fsm->file_name = normalize_utf8_path (file_name); if (!fsm->file_name) seaf_debug ("File name is not valid utf8 encoding.\n"); g_free (file_name); } break; } } if (!fsm->file_name) { seaf_debug ("[upload] No filename given.\n"); g_strfreev (params); return -1; } } g_strfreev (params); } return 0; } static int open_temp_file (RecvFSM *fsm) { GString *temp_file = g_string_new (NULL); char *base_name = get_basename(fsm->file_name); g_string_printf (temp_file, "%s/%sXXXXXX", seaf->http_server->http_temp_dir, base_name); g_free (base_name); fsm->fd = g_mkstemp (temp_file->str); if (fsm->fd < 0) { seaf_warning("[upload] Failed to open temp file: %s.\n", strerror(errno)); g_string_free (temp_file, TRUE); return -1; } fsm->tmp_file = g_string_free (temp_file, FALSE); /* For clean up later. */ if (fsm->rstart < 0) { fsm->files = g_list_prepend (fsm->files, g_strdup(fsm->tmp_file)); } return 0; } static evhtp_res recv_form_field (RecvFSM *fsm, gboolean *no_line) { char *line, *norm_line; size_t len; *no_line = FALSE; line = evbuffer_readln (fsm->line, &len, EVBUFFER_EOL_CRLF_STRICT); if (line != NULL) { if (strstr (line, fsm->boundary) != NULL) { seaf_debug ("[upload] form field ends.\n"); g_free (fsm->input_name); fsm->input_name = NULL; fsm->state = RECV_HEADERS; } else { seaf_debug ("[upload] form field is %s.\n", line); norm_line = normalize_utf8_path (line); if (norm_line) { g_hash_table_insert (fsm->form_kvs, g_strdup(fsm->input_name), norm_line); } } free (line); } else { size_t size = evbuffer_get_length (fsm->line); if (size >= strlen(fsm->boundary) + 4) { struct evbuffer_ptr search_boundary = evbuffer_search (fsm->line, fsm->boundary, strlen(fsm->boundary), NULL); if (search_boundary.pos != -1) { seaf_debug ("[upload] form field ends.\n"); evbuffer_drain (fsm->line, size); g_free (fsm->input_name); fsm->input_name = NULL; fsm->state = RECV_HEADERS; } } *no_line = TRUE; } return EVHTP_RES_OK; } static evhtp_res add_uploaded_file (RecvFSM *fsm) { if (fsm->rstart < 0) { // Non breakpoint transfer, same as original /* In case of using NFS, the error may only occur in close(). */ if (close (fsm->fd) < 0) { seaf_warning ("[upload] Failed to close temp file: %s\n", strerror(errno)); return EVHTP_RES_SERVERR; } fsm->filenames = g_list_prepend (fsm->filenames, get_basename(fsm->file_name)); g_free (fsm->file_name); g_free (fsm->tmp_file); fsm->file_name = NULL; fsm->tmp_file = NULL; fsm->recved_crlf = FALSE; } else { fsm->filenames = g_list_prepend (fsm->filenames, get_basename(fsm->file_name)); g_free (fsm->file_name); fsm->file_name = NULL; fsm->recved_crlf = FALSE; } return EVHTP_RES_OK; } static evhtp_res recv_file_data (RecvFSM *fsm, gboolean *no_line) { char *line; size_t len; *no_line = FALSE; line = evbuffer_readln (fsm->line, &len, EVBUFFER_EOL_CRLF_STRICT); if (!line) { // handle boundary size_t size = evbuffer_get_length (fsm->line); /* If we haven't read an entire line, but the line * buffer gets too long, flush the content to file, * or we reach the last boundary line (without CRLF at the end). * Since the last boundary line starts with "--" and ends with "--" * we have to add 4 bytes to the boundary size. */ if (size >= strlen(fsm->boundary) + 4) { char *buf = g_new0 (char, size + 1); evbuffer_remove (fsm->line, buf, size); // strstr need a '\0' if (strstr(buf, fsm->boundary) != NULL) { seaf_debug ("[upload] file data ends.\n"); evhtp_res res = add_uploaded_file (fsm); if (res != EVHTP_RES_OK) { g_free(buf); return res; } g_free (fsm->input_name); fsm->input_name = NULL; fsm->state = RECV_HEADERS; } else { seaf_debug ("[upload] recv file data %d bytes.\n", size); if (fsm->recved_crlf) { if (writen (fsm->fd, "\r\n", 2) < 0) { seaf_warning ("[upload] Failed to write temp file: %s.\n", strerror(errno)); return EVHTP_RES_SERVERR; } } if (writen (fsm->fd, buf, size) < 0) { seaf_warning ("[upload] Failed to write temp file: %s.\n", strerror(errno)); g_free (buf); return EVHTP_RES_SERVERR; } fsm->recved_crlf = FALSE; } g_free(buf); } *no_line = TRUE; } else if (strstr (line, fsm->boundary) != NULL) { seaf_debug ("[upload] file data ends.\n"); evhtp_res res = add_uploaded_file (fsm); if (res != EVHTP_RES_OK) { free (line); return res; } g_free (fsm->input_name); fsm->input_name = NULL; fsm->state = RECV_HEADERS; free (line); } else { seaf_debug ("[upload] recv file data %d bytes.\n", len + 2); if (fsm->recved_crlf) { if (writen (fsm->fd, "\r\n", 2) < 0) { seaf_warning ("[upload] Failed to write temp file: %s.\n", strerror(errno)); return EVHTP_RES_SERVERR; } } if (writen (fsm->fd, line, len) < 0) { seaf_warning ("[upload] Failed to write temp file: %s.\n", strerror(errno)); free (line); return EVHTP_RES_SERVERR; } free (line); fsm->recved_crlf = TRUE; } return EVHTP_RES_OK; } /* Refer to https://www.w3.org/Protocols/rfc1341/7_2_Multipart.html and https://tools.ietf.org/html/rfc7578 Example multipart form-data request content format: --AaB03x Content-Disposition: form-data; name="submit-name" Larry --AaB03x Content-Disposition: form-data; name="file"; filename="file1.txt" Content-Type: text/plain ... contents of file1.txt ... --AaB03x-- */ static evhtp_res upload_read_cb (evhtp_request_t *req, evbuf_t *buf, void *arg) { RecvFSM *fsm = arg; char *line; size_t len; gboolean no_line = FALSE; int res = EVHTP_RES_OK; if (fsm->state == RECV_ERROR) return EVHTP_RES_OK; /* Update upload progress. */ if (fsm->progress) { fsm->progress->uploaded += (gint64)evbuffer_get_length(buf); seaf_debug ("progress: %lld/%lld\n", fsm->progress->uploaded, fsm->progress->size); } evbuffer_add_buffer (fsm->line, buf); /* Drain the buffer so that evhtp don't copy it to another buffer * after this callback returns. */ evbuffer_drain (buf, evbuffer_get_length (buf)); while (!no_line) { switch (fsm->state) { case RECV_INIT: line = evbuffer_readln (fsm->line, &len, EVBUFFER_EOL_CRLF_STRICT); if (line != NULL) { seaf_debug ("[upload] boundary line: %s.\n", line); if (!strstr (line, fsm->boundary)) { seaf_debug ("[upload] no boundary found in the first line.\n"); free (line); res = EVHTP_RES_BADREQ; goto out; } else { fsm->state = RECV_HEADERS; free (line); } } else { no_line = TRUE; } break; case RECV_HEADERS: line = evbuffer_readln (fsm->line, &len, EVBUFFER_EOL_CRLF_STRICT); if (line != NULL) { seaf_debug ("[upload] mime header line: %s.\n", line); if (len == 0) { /* Read an blank line, headers end. */ free (line); // Each part MUST contain a Content-Disposition header field if (!fsm->input_name) { res = EVHTP_RES_BADREQ; goto out; } if (g_strcmp0 (fsm->input_name, "file") == 0) { if (open_temp_file (fsm) < 0) { seaf_warning ("[upload] Failed open temp file, errno:[%d]\n", errno); res = EVHTP_RES_SERVERR; goto out; } } seaf_debug ("[upload] Start to recv %s.\n", fsm->input_name); fsm->state = RECV_CONTENT; } else if (parse_mime_header (req, line, fsm) < 0) { free (line); res = EVHTP_RES_BADREQ; goto out; } else { free (line); } } else { no_line = TRUE; } break; case RECV_CONTENT: if (g_strcmp0 (fsm->input_name, "file") == 0) res = recv_file_data (fsm, &no_line); else res = recv_form_field (fsm, &no_line); if (res != EVHTP_RES_OK) goto out; break; } } out: if (res != EVHTP_RES_OK) { /* Don't receive any data before the connection is closed. */ //evhtp_request_pause (req); /* Set keepalive to 0. This will cause evhtp to close the * connection after sending the reply. */ req->keepalive = 0; fsm->state = RECV_ERROR; } if (res == EVHTP_RES_BADREQ) { send_error_reply (req, EVHTP_RES_BADREQ, "Bad request.\n"); } else if (res == EVHTP_RES_SERVERR) { send_error_reply (req, EVHTP_RES_SERVERR, "Internal server error\n"); } return EVHTP_RES_OK; } static char * get_http_header_param_value (const char *param) { char *equal; char *value; equal = strchr (param, '='); if (!equal) { seaf_debug ("[upload] Invalid http header param %s.\n", param); return NULL; } value = g_strdup (equal + 1); return value; } static char * get_boundary (evhtp_headers_t *hdr) { const char *content_type; char **params, **p; char *boundary = NULL; content_type = evhtp_kv_find (hdr, "Content-Type"); if (!content_type) { seaf_debug ("[upload] Missing Content-Type header\n"); return boundary; } params = g_strsplit (content_type, ";", 0); for (p = params; *p != NULL; ++p) *p = g_strstrip (*p); if (!params || g_strv_length (params) < 2) { seaf_debug ("[upload] Too little params Content-Type header\n"); g_strfreev (params); return boundary; } if (strcasecmp (params[0], "multipart/form-data") != 0) { seaf_debug ("[upload] Invalid Content-Type\n"); g_strfreev (params); return boundary; } for (p = params; *p != NULL; ++p) { if (strncasecmp (*p, "boundary", strlen("boundary")) == 0) { boundary = get_http_header_param_value (*p); break; } } g_strfreev (params); if (!boundary) { seaf_debug ("[upload] boundary not given\n"); } return boundary; } static int check_access_token (const char *token, const char *url_op, char **repo_id, char **parent_dir, char **user, char **token_type, char **err_msg) { SeafileWebAccess *webaccess; const char *op; const char *_repo_id; const char *_obj_id; const char *_parent_dir; json_t *parent_dir_json; webaccess = (SeafileWebAccess *) seaf_web_at_manager_query_access_token (seaf->web_at_mgr, token); if (!webaccess) { *err_msg = "Access token not found."; return -1; } _repo_id = seafile_web_access_get_repo_id (webaccess); int status = seaf_repo_manager_get_repo_status(seaf->repo_mgr, _repo_id); if (status != REPO_STATUS_NORMAL && status != -1) { *err_msg = "Repo status not writable."; g_object_unref (webaccess); return -1; } /* token with op = "upload" can only be used for "upload-*" operations; * token with op = "update" can only be used for "update-*" operations. */ op = seafile_web_access_get_op (webaccess); if (token_type) *token_type = g_strdup (op); if (g_strcmp0(op, "upload-link") == 0) op = "upload"; if (strncmp (url_op, op, strlen(op)) != 0) { *err_msg = "Operation does not match access token."; g_object_unref (webaccess); return -1; } *repo_id = g_strdup (_repo_id); *user = g_strdup (seafile_web_access_get_username (webaccess)); _obj_id = seafile_web_access_get_obj_id (webaccess); parent_dir_json = json_loadb (_obj_id, strlen (_obj_id), 0, NULL); if (parent_dir_json) { _parent_dir = json_object_get_string_member (parent_dir_json, "parent_dir"); if (_parent_dir){ *parent_dir = g_strdup(_parent_dir); } json_decref (parent_dir_json); } g_object_unref (webaccess); return 0; } static gboolean parse_range_val (evhtp_headers_t *hdr, gint64 *rstart, gint64 *rend, gint64 *rfsize) { const char *tmp = evhtp_kv_find (hdr, "Content-Range"); if (!tmp) return TRUE; char *next = NULL; gint64 start; gint64 end; gint64 fsize; if (strstr (tmp, "bytes") != tmp) { return FALSE; } tmp += strlen("bytes"); while (tmp && *tmp == ' ') { tmp++; } start = strtoll (tmp, &next, 10); if ((start == 0 && next == tmp) || *next != '-') { return FALSE; } tmp = next + 1; end = strtoll (tmp, &next, 10); if ((end == 0 && next == tmp) || *next != '/') { return FALSE; } tmp = next + 1; fsize = strtoll (tmp, &next, 10); if ((fsize == 0 && next == tmp) || *next != '\0') { return FALSE; } if (start > end || end >= fsize) { return FALSE; } *rstart = start; *rend = end; *rfsize = fsize; return TRUE; } static int get_progress_info (evhtp_request_t *req, evhtp_headers_t *hdr, gint64 *content_len, char **progress_id) { const char *content_len_str; const char *uuid; uuid = evhtp_kv_find (req->uri->query, "X-Progress-ID"); /* If progress id is not given, we don't need content-length either. */ if (!uuid) return 0; *progress_id = g_strdup(uuid); content_len_str = evhtp_kv_find (hdr, "Content-Length"); if (!content_len_str) { seaf_debug ("[upload] Content-Length not found.\n"); return -1; } *content_len = strtoll (content_len_str, NULL, 10); return 0; } static evhtp_res upload_headers_cb (evhtp_request_t *req, evhtp_headers_t *hdr, void *arg) { char **parts = NULL; char *token, *repo_id = NULL, *user = NULL; char *parent_dir = NULL; char *boundary = NULL; gint64 content_len; char *progress_id = NULL; char *err_msg = NULL; char *token_type = NULL; RecvFSM *fsm = NULL; Progress *progress = NULL; int error_code = EVHTP_RES_BADREQ; htp_method method = evhtp_request_get_method(req); if (method == htp_method_OPTIONS) { return EVHTP_RES_OK; } /* URL format: http://host:port/[upload|update]/?X-Progress-ID= */ token = req->uri->path->file; if (!token) { seaf_debug ("[upload] No token in url.\n"); err_msg = "No token in url"; goto err; } parts = g_strsplit (req->uri->path->full + 1, "/", 0); if (!parts || g_strv_length (parts) < 2) { err_msg = "Invalid URL"; goto err; } char *url_op = parts[0]; if (check_access_token (token, url_op, &repo_id, &parent_dir, &user, &token_type, &err_msg) < 0) { error_code = EVHTP_RES_FORBIDDEN; goto err; } gint64 rstart = -1; gint64 rend = -1; gint64 fsize = -1; if (!parse_range_val (hdr, &rstart, &rend, &fsize)) { seaf_warning ("Invalid Seafile-Content-Range value.\n"); err_msg = "Invalid Seafile-Content-Range"; goto err; } if (method == htp_method_POST || method == htp_method_PUT) { gint64 content_len = get_content_length (req); if (fsize > 0) { content_len = fsize; } // Check whether the file to be uploaded would exceed the quota before receiving the body, in order to avoid unnecessarily receiving the body. // After receiving the body, the quota is checked again to handle cases where the Content-Length in the request header is missing, which could make the initial quota check inaccurate. if (seaf_quota_manager_check_quota_with_delta (seaf->quota_mgr, repo_id, content_len) != 0) { error_code = SEAF_HTTP_RES_NOQUOTA; err_msg = "Out of quota.\n"; goto err; } if (seaf->max_upload_size > 0 && content_len > seaf->max_upload_size) { error_code = SEAF_HTTP_RES_TOOLARGE; err_msg = "File size is too large.\n"; goto err; } } boundary = get_boundary (hdr); if (!boundary) { err_msg = "Wrong boundary in url"; goto err; } if (get_progress_info (req, hdr, &content_len, &progress_id) < 0) { err_msg = "No progress info"; goto err; } if (progress_id != NULL) { pthread_mutex_lock (&pg_lock); if (g_hash_table_lookup (upload_progress, progress_id)) { pthread_mutex_unlock (&pg_lock); err_msg = "Duplicate progress id.\n"; goto err; } pthread_mutex_unlock (&pg_lock); } fsm = g_new0 (RecvFSM, 1); fsm->boundary = boundary; fsm->repo_id = repo_id; fsm->parent_dir = parent_dir; fsm->user = user; fsm->token_type = token_type; fsm->rstart = rstart; fsm->rend = rend; fsm->fsize = fsize; fsm->line = evbuffer_new (); fsm->form_kvs = g_hash_table_new_full (g_str_hash, g_str_equal, g_free, g_free); /* const char *need_idx_progress = evhtp_kv_find (req->uri->query, "need_idx_progress"); */ /* if (g_strcmp0(need_idx_progress, "true") == 0) */ /* fsm->need_idx_progress = TRUE; */ fsm->need_idx_progress = FALSE; if (progress_id != NULL) { progress = g_new0 (Progress, 1); progress->size = content_len; fsm->progress_id = progress_id; fsm->progress = progress; pthread_mutex_lock (&pg_lock); g_hash_table_insert (upload_progress, g_strdup(progress_id), progress); pthread_mutex_unlock (&pg_lock); } seaf_metric_manager_in_flight_request_inc (seaf->metric_mgr); /* Set up per-request hooks, so that we can read file data piece by piece. */ evhtp_set_hook (&req->hooks, evhtp_hook_on_read, upload_read_cb, fsm); evhtp_set_hook (&req->hooks, evhtp_hook_on_request_fini, upload_finish_cb, fsm); /* Set arg for upload_cb or update_cb. */ req->cbarg = fsm; g_strfreev (parts); return EVHTP_RES_OK; err: /* Don't receive any data before the connection is closed. */ //evhtp_request_pause (req); /* Set keepalive to 0. This will cause evhtp to close the * connection after sending the reply. */ req->keepalive = 0; send_error_reply (req, error_code, err_msg); g_free (repo_id); g_free (user); g_free (boundary); g_free (token_type); g_free (progress_id); g_strfreev (parts); return EVHTP_RES_OK; } /* static evhtp_res upload_link_headers_cb (evhtp_request_t *req, evhtp_headers_t *hdr, void *arg) { char **parts = NULL; char *token = NULL; const char *repo_id = NULL, *parent_dir = NULL; char *r_parent_dir = NULL; char *norm_parent_dir = NULL; char *user = NULL; char *boundary = NULL; gint64 content_len; char *progress_id = NULL; char *err_msg = NULL; RecvFSM *fsm = NULL; Progress *progress = NULL; int error_code = EVHTP_RES_BADREQ; SeafileShareLinkInfo *info = NULL; if (!seaf->seahub_pk) { seaf_warning ("No seahub private key is configured.\n"); return EVHTP_RES_NOTFOUND; } if (evhtp_request_get_method(req) == htp_method_OPTIONS) { return EVHTP_RES_OK; } token = req->uri->path->file; if (!token) { seaf_debug ("[upload] No token in url.\n"); err_msg = "No token in url"; goto err; } parts = g_strsplit (req->uri->path->full + 1, "/", 0); if (!parts || g_strv_length (parts) < 2) { err_msg = "Invalid URL"; goto err; } info = http_tx_manager_query_access_token (token, "upload"); if (!info) { err_msg = "Access token not found\n"; error_code = EVHTP_RES_FORBIDDEN; goto err; } repo_id = seafile_share_link_info_get_repo_id (info); parent_dir = seafile_share_link_info_get_parent_dir (info); if (!parent_dir) { err_msg = "No parent_dir\n"; goto err; } norm_parent_dir = normalize_utf8_path (parent_dir); r_parent_dir = format_dir_path (norm_parent_dir); user = seaf_repo_manager_get_repo_owner (seaf->repo_mgr, repo_id); boundary = get_boundary (hdr); if (!boundary) { err_msg = "Wrong boundary in url"; goto err; } if (get_progress_info (req, hdr, &content_len, &progress_id) < 0) { err_msg = "No progress info"; goto err; } if (progress_id != NULL) { pthread_mutex_lock (&pg_lock); if (g_hash_table_lookup (upload_progress, progress_id)) { pthread_mutex_unlock (&pg_lock); err_msg = "Duplicate progress id.\n"; goto err; } pthread_mutex_unlock (&pg_lock); } gint64 rstart = -1; gint64 rend = -1; gint64 fsize = -1; if (!parse_range_val (hdr, &rstart, &rend, &fsize)) { seaf_warning ("Invalid Seafile-Content-Range value.\n"); err_msg = "Invalid Seafile-Content-Range"; goto err; } fsm = g_new0 (RecvFSM, 1); fsm->boundary = boundary; fsm->repo_id = g_strdup (repo_id); fsm->parent_dir = r_parent_dir; fsm->user = user; fsm->token_type = "upload-link"; fsm->rstart = rstart; fsm->rend = rend; fsm->fsize = fsize; fsm->line = evbuffer_new (); fsm->form_kvs = g_hash_table_new_full (g_str_hash, g_str_equal, g_free, g_free); // const char *need_idx_progress = evhtp_kv_find (req->uri->query, "need_idx_progress"); // if (g_strcmp0(need_idx_progress, "true") == 0) // fsm->need_idx_progress = TRUE; fsm->need_idx_progress = FALSE; if (progress_id != NULL) { progress = g_new0 (Progress, 1); progress->size = content_len; fsm->progress_id = progress_id; fsm->progress = progress; pthread_mutex_lock (&pg_lock); g_hash_table_insert (upload_progress, g_strdup(progress_id), progress); pthread_mutex_unlock (&pg_lock); } // Set up per-request hooks, so that we can read file data piece by piece. evhtp_set_hook (&req->hooks, evhtp_hook_on_read, upload_read_cb, fsm); evhtp_set_hook (&req->hooks, evhtp_hook_on_request_fini, upload_finish_cb, fsm); // Set arg for upload_cb or update_cb. req->cbarg = fsm; g_free (norm_parent_dir); g_strfreev (parts); g_object_unref (info); return EVHTP_RES_OK; err: // Don't receive any data before the connection is closed. // evhtp_request_pause (req); // Set keepalive to 0. This will cause evhtp to close the // connection after sending the reply. req->keepalive = 0; send_error_reply (req, error_code, err_msg); g_free (norm_parent_dir); g_free (r_parent_dir); g_free (user); g_free (boundary); g_free (progress_id); g_strfreev (parts); if (info) g_object_unref (info); return EVHTP_RES_OK; } */ static void idx_progress_cb(evhtp_request_t *req, void *arg) { const char *progress_id; progress_id = evhtp_kv_find (req->uri->query, "task_id"); if (!progress_id) { seaf_debug ("[get pg] Index task id not found in url.\n"); send_error_reply (req, EVHTP_RES_BADREQ, "task id not found"); return; } char *progress_info = index_blocks_mgr_query_progress (seaf->index_blocks_mgr, progress_id, NULL); if (!progress_info) { send_error_reply (req, EVHTP_RES_NOTFOUND, "Failed to get index progress"); return; } evbuffer_add (req->buffer_out, progress_info, strlen(progress_info)); send_success_reply (req); g_free (progress_info); } static void upload_progress_cb(evhtp_request_t *req, void *arg) { const char *progress_id; const char *callback; Progress *progress; GString *buf; progress_id = evhtp_kv_find (req->uri->query, "X-Progress-ID"); if (!progress_id) { seaf_debug ("[get pg] Progress id not found in url.\n"); send_error_reply (req, EVHTP_RES_BADREQ, "Progress id not found"); return; } callback = evhtp_kv_find (req->uri->query, "callback"); if (!callback) { seaf_debug ("[get pg] callback not found in url.\n"); send_error_reply (req, EVHTP_RES_BADREQ, "Callback not found"); return; } pthread_mutex_lock (&pg_lock); progress = g_hash_table_lookup (upload_progress, progress_id); pthread_mutex_unlock (&pg_lock); if (!progress) { /* seaf_warning ("[get pg] No progress found for %s.\n", progress_id); */ send_error_reply (req, EVHTP_RES_BADREQ, "No progress found.\n"); return; } /* Return JSONP formated data. */ buf = g_string_new (NULL); g_string_append_printf (buf, "%s({\"uploaded\": %"G_GINT64_FORMAT", \"length\": %"G_GINT64_FORMAT"});", callback, progress->uploaded, progress->size); evbuffer_add (req->buffer_out, buf->str, buf->len); seaf_debug ("JSONP: %s\n", buf->str); send_success_reply (req); g_string_free (buf, TRUE); } int upload_file_init (evhtp_t *htp, const char *http_temp_dir) { evhtp_callback_t *cb; if (g_mkdir_with_parents (http_temp_dir, 0777) < 0) { seaf_warning ("Failed to create temp file dir %s.\n", http_temp_dir); return -1; } char *cluster_shared_dir = g_strdup_printf ("%s/cluster-shared", http_temp_dir); if (g_mkdir_with_parents (cluster_shared_dir, 0777) < 0) { seaf_warning ("Failed to create cluster shared dir %s.\n", cluster_shared_dir); g_free (cluster_shared_dir); return -1; } g_free (cluster_shared_dir); cb = evhtp_set_regex_cb (htp, "^/upload-api/.*", upload_api_cb, NULL); evhtp_set_hook(&cb->hooks, evhtp_hook_on_headers, upload_headers_cb, NULL); cb = evhtp_set_regex_cb (htp, "^/upload-raw-blks-api/.*", upload_raw_blks_api_cb, NULL); evhtp_set_hook(&cb->hooks, evhtp_hook_on_headers, upload_headers_cb, NULL); cb = evhtp_set_regex_cb (htp, "^/upload-blks-api/.*", upload_blks_api_cb, NULL); evhtp_set_hook(&cb->hooks, evhtp_hook_on_headers, upload_headers_cb, NULL); /* cb = evhtp_set_regex_cb (htp, "^/upload-blks-aj/.*", upload_blks_ajax_cb, NULL); */ /* evhtp_set_hook(&cb->hooks, evhtp_hook_on_headers, upload_headers_cb, NULL); */ cb = evhtp_set_regex_cb (htp, "^/upload-aj/.*", upload_ajax_cb, NULL); evhtp_set_hook(&cb->hooks, evhtp_hook_on_headers, upload_headers_cb, NULL); cb = evhtp_set_regex_cb (htp, "^/update-api/.*", update_api_cb, NULL); evhtp_set_hook(&cb->hooks, evhtp_hook_on_headers, upload_headers_cb, NULL); cb = evhtp_set_regex_cb (htp, "^/update-blks-api/.*", update_blks_api_cb, NULL); evhtp_set_hook(&cb->hooks, evhtp_hook_on_headers, upload_headers_cb, NULL); /* cb = evhtp_set_regex_cb (htp, "^/update-blks-aj/.*", update_blks_ajax_cb, NULL); */ /* evhtp_set_hook(&cb->hooks, evhtp_hook_on_headers, upload_headers_cb, NULL); */ cb = evhtp_set_regex_cb (htp, "^/update-aj/.*", update_ajax_cb, NULL); evhtp_set_hook(&cb->hooks, evhtp_hook_on_headers, upload_headers_cb, NULL); // upload links // cb = evhtp_set_regex_cb (htp, "^/u/.*", upload_link_cb, NULL); //evhtp_set_hook(&cb->hooks, evhtp_hook_on_headers, upload_link_headers_cb, NULL); evhtp_set_regex_cb (htp, "^/upload_progress.*", upload_progress_cb, NULL); evhtp_set_regex_cb (htp, "^/idx_progress.*", idx_progress_cb, NULL); upload_progress = g_hash_table_new_full (g_str_hash, g_str_equal, g_free, g_free); pthread_mutex_init (&pg_lock, NULL); return 0; } #endif ================================================ FILE: server/upload-file.h ================================================ #ifndef UPLOAD_FILE_H #define UPLOAD_FILE_H #ifdef HAVE_EVHTP int upload_file_init (evhtp_t *evhtp, const char *http_temp_dir); #endif #endif ================================================ FILE: server/virtual-repo.c ================================================ /* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */ #include "common.h" #include "utils.h" #define DEBUG_FLAG SEAFILE_DEBUG_OTHER #include "log.h" #include #include #include "seafile-session.h" #include "commit-mgr.h" #include "branch-mgr.h" #include "repo-mgr.h" #include "fs-mgr.h" #include "seafile-error.h" #include "seafile-crypt.h" #include "merge-new.h" #include "seafile-error.h" #include "seaf-db.h" #include "diff-simple.h" #define MAX_RUNNING_TASKS 5 #define SCHEDULE_INTERVAL 1000 /* 1s */ typedef struct MergeTask { char repo_id[37]; } MergeTask; typedef struct MergeScheduler { pthread_mutex_t q_lock; GQueue *queue; GHashTable *running; CcnetJobManager *tpool; CcnetTimer *timer; } MergeScheduler; static MergeScheduler *scheduler = NULL; static void add_merge_task (const char *repo_id); static int save_virtual_repo_info (SeafRepoManager *mgr, const char *repo_id, const char *origin_repo_id, const char *path, const char *base_commit) { int ret = 0; if (seaf_db_statement_query (mgr->seaf->db, "INSERT INTO VirtualRepo (repo_id, origin_repo, path, base_commit) VALUES (?, ?, ?, ?)", 4, "string", repo_id, "string", origin_repo_id, "string", path, "string", base_commit) < 0) ret = -1; return ret; } static int do_create_virtual_repo (SeafRepoManager *mgr, SeafRepo *origin_repo, const char *repo_id, const char *repo_name, const char *repo_desc, const char *root_id, const char *user, const char *passwd, GError **error) { SeafRepo *repo = NULL; SeafCommit *commit = NULL; SeafBranch *master = NULL; int ret = 0; repo = seaf_repo_new (repo_id, repo_name, repo_desc); repo->no_local_history = TRUE; if (passwd != NULL && passwd[0] != '\0') { repo->encrypted = TRUE; repo->enc_version = origin_repo->enc_version; if (repo->enc_version >= 3) memcpy (repo->salt, origin_repo->salt, 64); if (origin_repo->pwd_hash_algo) repo->pwd_hash_algo = g_strdup (origin_repo->pwd_hash_algo); if (origin_repo->pwd_hash_params) repo->pwd_hash_params = g_strdup (origin_repo->pwd_hash_params); if (repo->pwd_hash_algo) { seafile_generate_pwd_hash (repo->enc_version, repo_id, passwd, repo->salt, repo->pwd_hash_algo, repo->pwd_hash_params, repo->pwd_hash); memcpy (repo->magic, repo->pwd_hash, 32); } else seafile_generate_magic (repo->enc_version, repo_id, passwd, repo->salt, repo->magic); if (repo->enc_version >= 2) memcpy (repo->random_key, origin_repo->random_key, 96); } /* Virtual repos share fs and block store with origin repo and * have the same version as the origin. */ repo->version = origin_repo->version; memcpy (repo->store_id, origin_repo->id, 36); commit = seaf_commit_new (NULL, repo->id, root_id, /* root id */ user, /* creator */ EMPTY_SHA1, /* creator id */ repo_desc, /* description */ 0); /* ctime */ seaf_repo_to_commit (repo, commit); if (seaf_commit_manager_add_commit (seaf->commit_mgr, commit) < 0) { seaf_warning ("Failed to add commit.\n"); g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "Failed to add commit"); ret = -1; goto out; } master = seaf_branch_new ("master", repo->id, commit->commit_id); if (seaf_branch_manager_add_branch (seaf->branch_mgr, master) < 0) { seaf_warning ("Failed to add branch.\n"); g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "Failed to add branch"); ret = -1; goto out; } if (seaf_repo_set_head (repo, master) < 0) { seaf_warning ("Failed to set repo head.\n"); g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "Failed to set repo head."); ret = -1; goto out; } if (seaf_repo_manager_add_repo (mgr, repo) < 0) { seaf_warning ("Failed to add repo.\n"); g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "Failed to add repo."); ret = -1; goto out; } if (set_repo_commit_to_db (repo_id, repo_name, commit->ctime, repo->version, repo->encrypted, user) < 0) { seaf_warning("Failed to add repo info.\n"); g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "Failed to add repo info"); ret = -1; goto out; } out: if (repo) seaf_repo_unref (repo); if (commit) seaf_commit_unref (commit); if (master) seaf_branch_unref (master); return ret; } static void update_repo_size(const char *repo_id) { schedule_repo_size_computation (seaf->size_sched, repo_id); } static char * get_existing_virtual_repo (SeafRepoManager *mgr, const char *origin_repo_id, const char *path) { char *sql = "SELECT repo_id FROM VirtualRepo WHERE origin_repo = ? AND path = ?"; return seaf_db_statement_get_string (mgr->seaf->db, sql, 2, "string", origin_repo_id, "string", path); } static char * create_virtual_repo_common (SeafRepoManager *mgr, const char *origin_repo_id, const char *path, const char *repo_name, const char *repo_desc, const char *owner, const char *passwd, GError **error) { SeafRepo *origin_repo = NULL; SeafCommit *origin_head = NULL; char *repo_id = NULL; char *dir_id = NULL; origin_repo = seaf_repo_manager_get_repo (mgr, origin_repo_id); if (!origin_repo) { seaf_warning ("Failed to get origin repo %.10s\n", origin_repo_id); g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "Origin library not exists"); return NULL; } if (origin_repo->status != REPO_STATUS_NORMAL) { seaf_warning("Status of repo %.8s is %d, can't create VirtualRepo\n", origin_repo_id, origin_repo->status); g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "Unnormal repo status"); seaf_repo_unref (origin_repo); return NULL; } if (origin_repo->encrypted) { if (origin_repo->enc_version < 2) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Library encryption version must be higher than 2"); seaf_repo_unref (origin_repo); return NULL; } if (!passwd || passwd[0] == 0) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Password is not set"); seaf_repo_unref (origin_repo); return NULL; } if (origin_repo->pwd_hash_algo) { if (seafile_pwd_hash_verify_repo_passwd (origin_repo->enc_version, origin_repo_id, passwd, origin_repo->salt, origin_repo->pwd_hash, origin_repo->pwd_hash_algo, origin_repo->pwd_hash_params) < 0) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "Incorrect password"); seaf_repo_unref (origin_repo); return NULL; } } else { if (seafile_verify_repo_passwd (origin_repo_id, passwd, origin_repo->magic, origin_repo->enc_version, origin_repo->salt) < 0) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "Incorrect password"); seaf_repo_unref (origin_repo); return NULL; } } } origin_head = seaf_commit_manager_get_commit (seaf->commit_mgr, origin_repo->id, origin_repo->version, origin_repo->head->commit_id); if (!origin_head) { seaf_warning ("Failed to get head commit %.8s of repo %s.\n", origin_repo->head->commit_id, origin_repo->id); g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "Bad origin repo head"); goto error; } dir_id = seaf_fs_manager_get_seafdir_id_by_path (seaf->fs_mgr, origin_repo->store_id, origin_repo->version, origin_head->root_id, path, NULL); if (!dir_id) { seaf_warning ("Path %s doesn't exist or is not a dir in repo %.10s.\n", path, origin_repo_id); g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Bad path"); goto error; } repo_id = gen_uuid(); /* Save virtual repo info before actually create the repo. */ if (save_virtual_repo_info (mgr, repo_id, origin_repo_id, path, origin_head->commit_id) < 0) { seaf_warning ("Failed to save virtual repo info for %.10s:%s", origin_repo_id, path); g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "Internal error"); goto error; } if (do_create_virtual_repo (mgr, origin_repo, repo_id, repo_name, repo_desc, dir_id, owner, passwd, error) < 0) goto error; /* The size of virtual repo is non-zero at the beginning. */ update_repo_size (repo_id); seaf_repo_unref (origin_repo); seaf_commit_unref (origin_head); g_free (dir_id); return repo_id; error: seaf_repo_unref (origin_repo); seaf_commit_unref (origin_head); g_free (repo_id); g_free (dir_id); return NULL; } static char * canonical_vrepo_path (const char *path) { char *ret = NULL; if (path[0] != '/') ret = g_strconcat ("/", path, NULL); else ret = g_strdup(path); int len = strlen(ret); int i = len - 1; while (i >= 0 && ret[i] == '/') ret[i--] = 0; return ret; } char * seaf_repo_manager_create_virtual_repo (SeafRepoManager *mgr, const char *origin_repo_id, const char *path, const char *repo_name, const char *repo_desc, const char *owner, const char *passwd, GError **error) { char *repo_id = NULL; char *orig_owner = NULL; char *canon_path = NULL; SeafVirtRepo *vrepo = NULL; char *r_origin_repo_id = NULL; char *r_path = NULL; if (g_strcmp0 (path, "/") == 0) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "Invalid path"); return NULL; } canon_path = canonical_vrepo_path (path); vrepo = seaf_repo_manager_get_virtual_repo_info (mgr, origin_repo_id); if (vrepo) { // virtual repo r_path = g_strconcat(vrepo->path, canon_path, NULL); r_origin_repo_id = g_strdup (vrepo->origin_repo_id); seaf_virtual_repo_info_free (vrepo); repo_id = get_existing_virtual_repo (mgr, r_origin_repo_id, r_path); if (repo_id) { g_free (r_origin_repo_id); g_free (r_path); g_free (canon_path); return repo_id; } } else { r_path = g_strdup (canon_path); r_origin_repo_id = g_strdup (origin_repo_id); repo_id = get_existing_virtual_repo (mgr, r_origin_repo_id, r_path); if (repo_id) { g_free (r_origin_repo_id); g_free (r_path); g_free (canon_path); return repo_id; } } orig_owner = seaf_repo_manager_get_repo_owner (mgr, r_origin_repo_id); repo_id = create_virtual_repo_common (mgr, r_origin_repo_id, r_path, repo_name, repo_desc, orig_owner, passwd, error); if (!repo_id) { goto out; } if (seaf_repo_manager_set_repo_owner (mgr, repo_id, orig_owner) < 0) { seaf_warning ("Failed to set repo owner for %.10s.\n", repo_id); g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "Failed to set repo owner."); g_free (repo_id); repo_id = NULL; } out: g_free (orig_owner); g_free (r_origin_repo_id); g_free (r_path); g_free (canon_path); return repo_id; } static gboolean load_virtual_info (SeafDBRow *row, void *p_vinfo) { SeafVirtRepo *vinfo; const char *repo_id, *origin_repo_id, *path, *base_commit; repo_id = seaf_db_row_get_column_text (row, 0); origin_repo_id = seaf_db_row_get_column_text (row, 1); path = seaf_db_row_get_column_text (row, 2); base_commit = seaf_db_row_get_column_text (row, 3); vinfo = g_new0 (SeafVirtRepo, 1); memcpy (vinfo->repo_id, repo_id, 36); memcpy (vinfo->origin_repo_id, origin_repo_id, 36); vinfo->path = g_strdup(path); memcpy (vinfo->base_commit, base_commit, 40); *((SeafVirtRepo **)p_vinfo) = vinfo; return FALSE; } SeafVirtRepo * seaf_repo_manager_get_virtual_repo_info (SeafRepoManager *mgr, const char *repo_id) { char *sql; SeafVirtRepo *vinfo = NULL; sql = "SELECT repo_id, origin_repo, path, base_commit FROM VirtualRepo " "WHERE repo_id = ?"; seaf_db_statement_foreach_row (seaf->db, sql, load_virtual_info, &vinfo, 1, "string", repo_id); return vinfo; } void seaf_virtual_repo_info_free (SeafVirtRepo *vinfo) { if (!vinfo) return; g_free (vinfo->path); g_free (vinfo); } gboolean seaf_repo_manager_is_virtual_repo (SeafRepoManager *mgr, const char *repo_id) { gboolean db_err; char *sql = "SELECT 1 FROM VirtualRepo WHERE repo_id = ?"; return seaf_db_statement_exists (seaf->db, sql, &db_err, 1, "string", repo_id); } char * seaf_repo_manager_get_virtual_repo_id (SeafRepoManager *mgr, const char *origin_repo, const char *path, const char *owner) { char *sql; char *ret; if (owner) { sql = "SELECT RepoOwner.repo_id FROM RepoOwner, VirtualRepo " "WHERE owner_id=? AND origin_repo=? AND path=? " "AND RepoOwner.repo_id = VirtualRepo.repo_id"; ret = seaf_db_statement_get_string (mgr->seaf->db, sql, 3, "string", owner, "string", origin_repo, "string", path); } else { sql = "SELECT repo_id FROM VirtualRepo " "WHERE origin_repo=? AND path=? "; ret = seaf_db_statement_get_string (mgr->seaf->db, sql, 2, "string", origin_repo, "string", path); } return ret; } static gboolean collect_virtual_repo_ids (SeafDBRow *row, void *data) { GList **p_ids = data; const char *repo_id; repo_id = seaf_db_row_get_column_text (row, 0); *p_ids = g_list_prepend (*p_ids, g_strdup(repo_id)); return TRUE; } GList * seaf_repo_manager_get_virtual_repos_by_owner (SeafRepoManager *mgr, const char *owner, GError **error) { GList *id_list = NULL, *ptr; GList *ret = NULL; char *sql; sql = "SELECT RepoOwner.repo_id FROM RepoOwner, VirtualRepo " "WHERE owner_id=? " "AND RepoOwner.repo_id = VirtualRepo.repo_id"; if (seaf_db_statement_foreach_row (mgr->seaf->db, sql, collect_virtual_repo_ids, &id_list, 1, "string", owner) < 0) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "DB error"); return NULL; } char *repo_id; SeafRepo *repo; for (ptr = id_list; ptr; ptr = ptr->next) { repo_id = ptr->data; repo = seaf_repo_manager_get_repo (mgr, repo_id); if (repo != NULL) ret = g_list_prepend (ret, repo); } string_list_free (id_list); return ret; } GList * seaf_repo_manager_get_virtual_repo_ids_by_origin (SeafRepoManager *mgr, const char *origin_repo) { GList *ret = NULL; char *sql; sql = "SELECT repo_id FROM VirtualRepo WHERE origin_repo=?"; if (seaf_db_statement_foreach_row (mgr->seaf->db, sql, collect_virtual_repo_ids, &ret, 1, "string", origin_repo) < 0) { return NULL; } return g_list_reverse (ret); } static gboolean collect_virtual_info (SeafDBRow *row, void *plist) { GList **pret = plist; SeafVirtRepo *vinfo; const char *repo_id, *origin_repo_id, *path, *base_commit; repo_id = seaf_db_row_get_column_text (row, 0); origin_repo_id = seaf_db_row_get_column_text (row, 1); path = seaf_db_row_get_column_text (row, 2); base_commit = seaf_db_row_get_column_text (row, 3); vinfo = g_new0 (SeafVirtRepo, 1); memcpy (vinfo->repo_id, repo_id, 36); memcpy (vinfo->origin_repo_id, origin_repo_id, 36); vinfo->path = g_strdup(path); memcpy (vinfo->base_commit, base_commit, 40); *pret = g_list_prepend (*pret, vinfo); return TRUE; } GList * seaf_repo_manager_get_virtual_info_by_origin (SeafRepoManager *mgr, const char *origin_repo) { GList *ret = NULL; char *sql; sql = "SELECT repo_id, origin_repo, path, base_commit " "FROM VirtualRepo WHERE origin_repo=?"; if (seaf_db_statement_foreach_row (mgr->seaf->db, sql, collect_virtual_info, &ret, 1, "string", origin_repo) < 0) { return NULL; } return g_list_reverse (ret); } static void set_virtual_repo_base_commit_path (const char *vrepo_id, const char *base_commit_id, const char *new_path) { seaf_db_statement_query (seaf->db, "UPDATE VirtualRepo SET base_commit=?, path=? WHERE repo_id=?", 3, "string", base_commit_id, "string", new_path, "string", vrepo_id); } int seaf_repo_manager_merge_virtual_repo (SeafRepoManager *mgr, const char *repo_id, const char *exclude_repo) { GList *vrepos = NULL, *ptr; char *vrepo_id; int ret = 0; if (seaf_repo_manager_is_virtual_repo (mgr, repo_id)) { add_merge_task (repo_id); return 0; } vrepos = seaf_repo_manager_get_virtual_repo_ids_by_origin (mgr, repo_id); for (ptr = vrepos; ptr; ptr = ptr->next) { vrepo_id = ptr->data; if (g_strcmp0 (exclude_repo, vrepo_id) == 0) continue; add_merge_task (vrepo_id); } string_list_free (vrepos); return ret; } /* * If the missing virtual repo is renamed, update database entry; * otherwise delete the virtual repo. */ static void handle_missing_virtual_repo (SeafRepoManager *mgr, SeafRepo *repo, SeafCommit *head, SeafVirtRepo *vinfo, char **return_new_path) { SeafCommit *parent = NULL; char *old_dir_id = NULL; GList *diff_res = NULL, *ptr; DiffEntry *de; parent = seaf_commit_manager_get_commit (seaf->commit_mgr, head->repo_id, head->version, head->parent_id); if (!parent) { seaf_warning ("Failed to find commit %s:%s.\n", head->repo_id, head->parent_id); return; } int rc = diff_commits (parent, head, &diff_res, TRUE); if (rc < 0) { seaf_warning ("Failed to diff commit %s to %s.\n", parent->commit_id, head->commit_id); seaf_commit_unref (parent); return; } char *path = vinfo->path, *sub_path, *p, *par_path; gboolean is_renamed = FALSE; p = &path[strlen(path)]; par_path = g_strdup(path); sub_path = NULL; while (1) { GError *error = NULL; old_dir_id = seaf_fs_manager_get_seafdir_id_by_path (seaf->fs_mgr, repo->store_id, repo->version, parent->root_id, par_path, &error); if (!old_dir_id) { if (error && error->code == SEAF_ERR_PATH_NO_EXIST) { seaf_warning ("Failed to find %s under commit %s in repo %s.\n", par_path, parent->commit_id, repo->store_id); seaf_debug ("Delete virtual repo %.10s.\n", vinfo->repo_id); seaf_repo_manager_del_virtual_repo (mgr, vinfo->repo_id); g_clear_error (&error); } goto out; } char de_id[41]; char *new_path, *new_name; for (ptr = diff_res; ptr; ptr = ptr->next) { de = ptr->data; if (de->status == DIFF_STATUS_DIR_RENAMED) { rawdata_to_hex (de->sha1, de_id, 20); if (strcmp (de_id, old_dir_id) == 0) { if (sub_path != NULL) new_path = g_strconcat ("/", de->new_name, "/", sub_path, NULL); else new_path = g_strconcat ("/", de->new_name, NULL); seaf_debug ("Updating path of virtual repo %s to %s.\n", vinfo->repo_id, new_path); set_virtual_repo_base_commit_path (vinfo->repo_id, head->commit_id, new_path); if (return_new_path) *return_new_path = g_strdup(new_path); /* 'sub_path = NUll' means the virtual dir itself has been renamed, * we need to make a new commit for the virtual repo */ if (sub_path == NULL) { new_name = g_path_get_basename(new_path); seaf_repo_manager_edit_repo (vinfo->repo_id, new_name, "Changed library name", NULL, &error); if (error) { seaf_warning ("Failed to rename repo %s", new_name); g_clear_error (&error); } g_free(new_name); } is_renamed = TRUE; g_free (new_path); break; } } } g_free (old_dir_id); if (is_renamed) break; while (--p != path && *p != '/'); if (p == path) break; g_free (par_path); g_free (sub_path); par_path = g_strndup (path, p - path); sub_path = g_strdup (p + 1); } if (!is_renamed) { seaf_debug ("Delete virtual repo %.10s.\n", vinfo->repo_id); seaf_repo_manager_del_virtual_repo (mgr, vinfo->repo_id); } out: g_free (par_path); g_free (sub_path); for (ptr = diff_res; ptr; ptr = ptr->next) diff_entry_free ((DiffEntry *)ptr->data); g_list_free (diff_res); seaf_commit_unref (parent); } void seaf_repo_manager_cleanup_virtual_repos (SeafRepoManager *mgr, const char *origin_repo_id) { SeafRepo *repo = NULL; SeafCommit *head = NULL; GList *vinfo_list = NULL, *ptr; SeafVirtRepo *vinfo; SeafDir *dir; GError *error = NULL; repo = seaf_repo_manager_get_repo (mgr, origin_repo_id); if (!repo) { seaf_warning ("Failed to get repo %.10s.\n", origin_repo_id); goto out; } head = seaf_commit_manager_get_commit (seaf->commit_mgr, repo->id, repo->version, repo->head->commit_id); if (!head) { seaf_warning ("Failed to get commit %s:%.8s.\n", repo->id, repo->head->commit_id); goto out; } vinfo_list = seaf_repo_manager_get_virtual_info_by_origin (mgr, origin_repo_id); for (ptr = vinfo_list; ptr; ptr = ptr->next) { vinfo = ptr->data; dir = seaf_fs_manager_get_seafdir_by_path (seaf->fs_mgr, repo->store_id, repo->version, head->root_id, vinfo->path, &error); if (error) { if (error->code == SEAF_ERR_PATH_NO_EXIST) { handle_missing_virtual_repo (mgr, repo, head, vinfo, NULL); } g_clear_error (&error); } else seaf_dir_free (dir); seaf_virtual_repo_info_free (vinfo); } out: seaf_repo_unref (repo); seaf_commit_unref (head); g_list_free (vinfo_list); } static void *merge_virtual_repo (void *vtask) { MergeTask *task = vtask; SeafRepoManager *mgr = seaf->repo_mgr; char *repo_id = task->repo_id; SeafVirtRepo *vinfo; SeafRepo *repo = NULL, *orig_repo = NULL; SeafCommit *head = NULL, *orig_head = NULL, *base = NULL; char *root = NULL, *orig_root = NULL, *base_root = NULL; char new_base_commit[41] = {0}; int ret = 0; GError *error = NULL; /* repos */ repo = seaf_repo_manager_get_repo (mgr, repo_id); if (!repo) { seaf_warning ("Failed to get virt repo %.10s.\n", repo_id); ret = -1; goto out; } vinfo = repo->virtual_info; orig_repo = seaf_repo_manager_get_repo (mgr, vinfo->origin_repo_id); if (!orig_repo) { seaf_warning ("Failed to get orig repo %.10s.\n", vinfo->origin_repo_id); ret = -1; goto out; } /* commits */ head = seaf_commit_manager_get_commit (seaf->commit_mgr, repo->id, repo->version, repo->head->commit_id); if (!head) { seaf_warning ("Failed to get commit %s:%.8s.\n", repo->id, repo->head->commit_id); ret = -1; goto out; } orig_head = seaf_commit_manager_get_commit (seaf->commit_mgr, orig_repo->id, orig_repo->version, orig_repo->head->commit_id); if (!orig_head) { seaf_warning ("Failed to get commit %s:%.8s.\n", orig_repo->id, orig_repo->head->commit_id); ret = -1; goto out; } orig_root = seaf_fs_manager_get_seafdir_id_by_path (seaf->fs_mgr, orig_repo->store_id, orig_repo->version, orig_head->root_id, vinfo->path, &error); if (error && !g_error_matches(error, SEAFILE_DOMAIN, SEAF_ERR_PATH_NO_EXIST)) { seaf_warning ("Failed to get seafdir id by path in origin repo %.10s: %s.\n", orig_repo->store_id, error->message); ret = -1; goto out; } if (!orig_root) { seaf_debug("Path %s not found in origin repo %.8s, delete or rename virtual repo %.8s\n", vinfo->path, vinfo->origin_repo_id, repo_id); char *new_path = NULL; handle_missing_virtual_repo (mgr, orig_repo, orig_head, vinfo, &new_path); if (new_path != NULL) { orig_root = seaf_fs_manager_get_seafdir_id_by_path (seaf->fs_mgr, orig_repo->store_id, orig_repo->version, orig_head->root_id, new_path, NULL); g_free (new_path); } if (!orig_root) goto out; } base = seaf_commit_manager_get_commit (seaf->commit_mgr, orig_repo->id, orig_repo->version, vinfo->base_commit); if (!base) { seaf_warning ("Failed to get commit %s:%.8s.\n", orig_repo->id, vinfo->base_commit); ret = -1; goto out; } /* fs roots */ root = head->root_id; base_root = seaf_fs_manager_get_seafdir_id_by_path (seaf->fs_mgr, orig_repo->store_id, orig_repo->version, base->root_id, vinfo->path, NULL); if (!base_root) { seaf_warning ("Cannot find seafdir for repo %.10s path %s.\n", vinfo->origin_repo_id, vinfo->path); ret = -1; goto out; } if (strcmp (root, orig_root) == 0) { /* Nothing to merge. */ seaf_debug ("Nothing to merge.\n"); } else if (strcmp (base_root, root) == 0) { /* Origin changed, virtual repo not changed. */ seaf_debug ("Origin changed, virtual repo not changed.\n"); ret = seaf_repo_manager_update_dir (mgr, repo_id, "/", orig_root, orig_head->creator_name, head->commit_id, NULL, NULL); if (ret < 0) { seaf_warning ("Failed to update root of virtual repo %.10s.\n", repo_id); goto out; } set_virtual_repo_base_commit_path (repo->id, orig_repo->head->commit_id, vinfo->path); } else if (strcmp (base_root, orig_root) == 0) { /* Origin not changed, virutal repo changed. */ seaf_debug ("Origin not changed, virutal repo changed.\n"); ret = seaf_repo_manager_update_dir (mgr, vinfo->origin_repo_id, vinfo->path, root, head->creator_name, orig_head->commit_id, new_base_commit, NULL); if (ret < 0) { seaf_warning ("Failed to update origin repo %.10s path %s.\n", vinfo->origin_repo_id, vinfo->path); goto out; } set_virtual_repo_base_commit_path (repo->id, new_base_commit, vinfo->path); /* Since origin repo is updated, we have to merge it with other * virtual repos if necessary. But we don't need to merge with * the current virtual repo again. */ seaf_repo_manager_cleanup_virtual_repos (mgr, vinfo->origin_repo_id); seaf_repo_manager_merge_virtual_repo (mgr, vinfo->origin_repo_id, repo_id); } else { /* Both origin and virtual repo are changed. */ seaf_debug ("Both origin and virtual repo are changed.\n"); MergeOptions opt; const char *roots[3]; memset (&opt, 0, sizeof(opt)); opt.n_ways = 3; memcpy (opt.remote_repo_id, repo_id, 36); memcpy (opt.remote_head, head->commit_id, 40); opt.do_merge = TRUE; roots[0] = base_root; /* base */ roots[1] = orig_root; /* head */ roots[2] = root; /* remote */ /* Merge virtual into origin */ if (seaf_merge_trees (orig_repo->store_id, orig_repo->version, 3, roots, &opt) < 0) { seaf_warning ("Failed to merge virtual repo %.10s.\n", repo_id); ret = -1; goto out; } seaf_debug ("Number of dirs visted in merge: %d.\n", opt.visit_dirs); /* Update virtual repo root. */ ret = seaf_repo_manager_update_dir (mgr, repo_id, "/", opt.merged_tree_root, orig_head->creator_name, head->commit_id, NULL, NULL); if (ret < 0) { seaf_warning ("Failed to update root of virtual repo %.10s.\n", repo_id); goto out; } /* Update origin repo path. */ ret = seaf_repo_manager_update_dir (mgr, vinfo->origin_repo_id, vinfo->path, opt.merged_tree_root, head->creator_name, orig_head->commit_id, new_base_commit, NULL); if (ret < 0) { seaf_warning ("Failed to update origin repo %.10s path %s.\n", vinfo->origin_repo_id, vinfo->path); goto out; } set_virtual_repo_base_commit_path (repo->id, new_base_commit, vinfo->path); seaf_repo_manager_cleanup_virtual_repos (mgr, vinfo->origin_repo_id); seaf_repo_manager_merge_virtual_repo (mgr, vinfo->origin_repo_id, repo_id); } out: if (error) g_clear_error (&error); seaf_repo_unref (repo); seaf_repo_unref (orig_repo); seaf_commit_unref (head); seaf_commit_unref (orig_head); seaf_commit_unref (base); g_free (base_root); g_free (orig_root); return vtask; } static void merge_virtual_repo_done (void *vtask) { MergeTask *task = vtask; seaf_debug ("Task %.8s done.\n", task->repo_id); g_hash_table_remove (scheduler->running, task->repo_id); } static int schedule_merge_tasks (void *vscheduler) { MergeScheduler *scheduler = vscheduler; int n_running = g_hash_table_size (scheduler->running); MergeTask *task; /* seaf_debug ("Waiting tasks %d, running tasks %d.\n", */ /* g_queue_get_length (scheduler->queue), n_running); */ if (n_running >= MAX_RUNNING_TASKS) return TRUE; pthread_mutex_lock (&scheduler->q_lock); while (n_running < MAX_RUNNING_TASKS) { task = g_queue_pop_head (scheduler->queue); if (!task) break; if (!g_hash_table_lookup (scheduler->running, task->repo_id)) { int ret = ccnet_job_manager_schedule_job (scheduler->tpool, merge_virtual_repo, merge_virtual_repo_done, task); if (ret < 0) { g_queue_push_tail (scheduler->queue, task); break; } g_hash_table_insert (scheduler->running, g_strdup(task->repo_id), task); n_running++; seaf_debug ("Run task for repo %.8s.\n", task->repo_id); } else { seaf_debug ("A task for repo %.8s is already running.\n", task->repo_id); g_queue_push_tail (scheduler->queue, task); break; } } pthread_mutex_unlock (&scheduler->q_lock); return TRUE; } static gint task_cmp (gconstpointer a, gconstpointer b) { const MergeTask *task_a = a; const MergeTask *task_b = b; return strcmp (task_a->repo_id, task_b->repo_id); } static void add_merge_task (const char *repo_id) { MergeTask *task = g_new0 (MergeTask, 1); seaf_debug ("Add merge task for repo %.8s.\n", repo_id); memcpy (task->repo_id, repo_id, 36); pthread_mutex_lock (&scheduler->q_lock); if (g_queue_find_custom (scheduler->queue, task, task_cmp) != NULL) { seaf_debug ("Task for repo %.8s is already queued.\n", repo_id); g_free (task); } else g_queue_push_tail (scheduler->queue, task); pthread_mutex_unlock (&scheduler->q_lock); } int seaf_repo_manager_init_merge_scheduler () { scheduler = g_new0 (MergeScheduler, 1); if (!scheduler) return -1; pthread_mutex_init (&scheduler->q_lock, NULL); scheduler->queue = g_queue_new (); scheduler->running = g_hash_table_new_full (g_str_hash, g_str_equal, g_free, g_free); scheduler->tpool = ccnet_job_manager_new (MAX_RUNNING_TASKS); scheduler->timer = ccnet_timer_new (schedule_merge_tasks, scheduler, SCHEDULE_INTERVAL); return 0; } int seaf_repo_manager_repair_virtual_repo (char *repo_id) { SeafRepoManager *mgr = seaf->repo_mgr; SeafVirtRepo *vinfo = NULL; SeafRepo *repo = NULL, *orig_repo = NULL; SeafCommit *head = NULL, *orig_head = NULL; char *root = NULL, *orig_root = NULL; char new_base_commit[41] = {0}; int ret = 0; GError *error = NULL; /* repos */ repo = seaf_repo_manager_get_repo (mgr, repo_id); if (!repo) { seaf_warning ("Failed to get virt repo %.10s.\n", repo_id); ret = -1; goto out; } if (!repo->virtual_info) { seaf_warning ("Repo %.10s is not a virtual repo.\n", repo_id); ret = -1; goto out; } vinfo = seaf_repo_manager_get_virtual_repo_info (mgr, repo_id); if (!vinfo) { seaf_warning ("Failed to get virt repo info %.10s.\n", repo_id); ret = -1; goto out; } orig_repo = seaf_repo_manager_get_repo (mgr, vinfo->origin_repo_id); if (!orig_repo) { seaf_warning ("Failed to get orig repo %.10s.\n", vinfo->origin_repo_id); ret = -1; goto out; } /* commits */ head = seaf_commit_manager_get_commit (seaf->commit_mgr, repo->id, repo->version, repo->head->commit_id); if (!head) { seaf_warning ("Failed to get virtual repo commit %s:%.8s.\n", repo->id, repo->head->commit_id); ret = -1; goto out; } orig_head = seaf_commit_manager_get_commit (seaf->commit_mgr, orig_repo->id, orig_repo->version, orig_repo->head->commit_id); if (!orig_head) { seaf_warning ("Failed to get origin repo commit %s:%.8s.\n", orig_repo->id, orig_repo->head->commit_id); ret = -1; goto out; } orig_root = seaf_fs_manager_get_seafdir_id_by_path (seaf->fs_mgr, orig_repo->store_id, orig_repo->version, orig_head->root_id, vinfo->path, &error); if (error && !g_error_matches(error, SEAFILE_DOMAIN, SEAF_ERR_PATH_NO_EXIST)) { seaf_warning ("Failed to get seafdir id by path in origin repo %.10s: %s.\n", orig_repo->store_id, error->message); ret = -1; goto out; } if (!orig_root) { seaf_message("Path %s not found in origin repo %.8s, delete or rename virtual repo %.8s\n", vinfo->path, vinfo->origin_repo_id, repo_id); goto out; } /* fs roots */ root = head->root_id; MergeOptions opt; const char *roots[2]; memset (&opt, 0, sizeof(opt)); opt.n_ways = 2; memcpy (opt.remote_repo_id, repo_id, 36); memcpy (opt.remote_head, head->commit_id, 40); roots[0] = orig_root; roots[1] = root; /* Merge virtual into origin */ if (seaf_merge_trees (orig_repo->store_id, orig_repo->version, 2, roots, &opt) < 0) { seaf_warning ("Failed to merge virtual repo %.10s.\n", repo_id); ret = -1; goto out; } seaf_debug ("Number of dirs visted in merge: %d.\n", opt.visit_dirs); /* Update virtual repo root. */ ret = seaf_repo_manager_update_dir (mgr, repo_id, "/", opt.merged_tree_root, orig_head->creator_name, head->commit_id, NULL, NULL); if (ret < 0) { seaf_warning ("Failed to update root of virtual repo %.10s.\n", repo_id); goto out; } /* Update origin repo path. */ ret = seaf_repo_manager_update_dir (mgr, vinfo->origin_repo_id, vinfo->path, opt.merged_tree_root, head->creator_name, orig_head->commit_id, new_base_commit, NULL); if (ret < 0) { seaf_warning ("Failed to update origin repo %.10s path %s.\n", vinfo->origin_repo_id, vinfo->path); goto out; } set_virtual_repo_base_commit_path (repo->id, new_base_commit, vinfo->path); out: if (error) g_clear_error (&error); seaf_virtual_repo_info_free (vinfo); seaf_repo_unref (repo); seaf_repo_unref (orig_repo); seaf_commit_unref (head); seaf_commit_unref (orig_head); g_free (orig_root); return ret; } ================================================ FILE: server/web-accesstoken-mgr.c ================================================ /* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */ #include "common.h" #include #include #include "seafile-session.h" #include "web-accesstoken-mgr.h" #include "seafile-error.h" #include "utils.h" #include "log.h" #define CLEANING_INTERVAL_MSEC 1000*300 /* 5 minutes */ #define TOKEN_EXPIRE_TIME 3600 /* 1 hour */ #define TOKEN_LEN 36 struct WebATPriv { GHashTable *access_token_hash; /* token -> access info */ pthread_mutex_t lock; gboolean cluster_mode; struct ObjCache *cache; }; typedef struct WebATPriv WebATPriv; /* #define DEBUG 1 */ typedef struct { char *repo_id; char *obj_id; char *op; char *username; long expire_time; gboolean use_onetime; } AccessInfo; static void free_access_info (AccessInfo *info) { if (!info) return; g_free (info->repo_id); g_free (info->obj_id); g_free (info->op); g_free (info->username); g_free (info); } SeafWebAccessTokenManager* seaf_web_at_manager_new (SeafileSession *session) { SeafWebAccessTokenManager *mgr = g_new0 (SeafWebAccessTokenManager, 1); mgr->seaf = session; mgr->priv = g_new0(WebATPriv, 1); mgr->priv->access_token_hash = g_hash_table_new_full (g_str_hash, g_str_equal, g_free, (GDestroyNotify)free_access_info); pthread_mutex_init (&mgr->priv->lock, NULL); return mgr; } static gboolean remove_expire_info (gpointer key, gpointer value, gpointer user_data) { AccessInfo *info = (AccessInfo *)value; long now = *((long*)user_data); if (info && now >= info->expire_time) { return TRUE; } return FALSE; } static int clean_pulse (void *vmanager) { SeafWebAccessTokenManager *manager = vmanager; long now = (long)time(NULL); pthread_mutex_lock (&manager->priv->lock); g_hash_table_foreach_remove (manager->priv->access_token_hash, remove_expire_info, &now); pthread_mutex_unlock (&manager->priv->lock); return TRUE; } int seaf_web_at_manager_start (SeafWebAccessTokenManager *mgr) { ccnet_timer_new (clean_pulse, mgr, CLEANING_INTERVAL_MSEC); return 0; } static char * gen_new_token (GHashTable *token_hash) { char uuid[37]; char *token; while (1) { gen_uuid_inplace (uuid); token = g_strndup(uuid, TOKEN_LEN); /* Make sure the new token doesn't conflict with an existing one. */ if (g_hash_table_lookup (token_hash, token) != NULL) g_free (token); else return token; } } char * seaf_web_at_manager_get_access_token (SeafWebAccessTokenManager *mgr, const char *repo_id, const char *obj_id, const char *op, const char *username, int use_onetime, GError **error) { AccessInfo *info; long now = (long)time(NULL); long expire; char *t; SeafileWebAccess *webaccess; if (strcmp(op, "view") != 0 && strcmp(op, "download") != 0 && strcmp(op, "downloadblks") != 0 && strcmp(op, "download-dir") != 0 && strcmp(op, "download-multi") != 0 && strcmp(op, "download-link") != 0 && strcmp(op, "download-dir-link") != 0 && strcmp(op, "download-multi-link") != 0 && strcmp(op, "upload") != 0 && strcmp(op, "update") != 0 && strcmp(op, "upload-link") != 0 && strcmp(op, "upload-blks-api") != 0 && strcmp(op, "upload-blks-aj") != 0 && strcmp(op, "update-blks-api") != 0 && strcmp(op, "update-blks-aj") != 0) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "Invalid operation type."); return NULL; } pthread_mutex_lock (&mgr->priv->lock); t = gen_new_token (mgr->priv->access_token_hash); expire = now + seaf->web_token_expire_time; info = g_new0 (AccessInfo, 1); info->repo_id = g_strdup (repo_id); info->obj_id = g_strdup (obj_id); info->op = g_strdup (op); info->username = g_strdup (username); info->expire_time = expire; if (use_onetime) { info->use_onetime = TRUE; } g_hash_table_insert (mgr->priv->access_token_hash, g_strdup(t), info); pthread_mutex_unlock (&mgr->priv->lock); #ifdef HAVE_EVHTP if (!seaf->go_fileserver) { if (strcmp(op, "download-dir") == 0 || strcmp(op, "download-multi") == 0 || strcmp(op, "download-dir-link") == 0 || strcmp(op, "download-multi-link") == 0) { webaccess = g_object_new (SEAFILE_TYPE_WEB_ACCESS, "repo_id", info->repo_id, "obj_id", info->obj_id, "op", info->op, "username", info->username, NULL); if (zip_download_mgr_start_zip_task (seaf->zip_download_mgr, t, webaccess, error) < 0) { pthread_mutex_lock (&mgr->priv->lock); g_hash_table_remove (mgr->priv->access_token_hash, t); pthread_mutex_unlock (&mgr->priv->lock); g_object_unref (webaccess); g_free (t); return NULL; } g_object_unref (webaccess); } } #endif return t; } SeafileWebAccess * seaf_web_at_manager_query_access_token (SeafWebAccessTokenManager *mgr, const char *token) { SeafileWebAccess *webaccess; AccessInfo *info; pthread_mutex_lock (&mgr->priv->lock); info = g_hash_table_lookup (mgr->priv->access_token_hash, token); pthread_mutex_unlock (&mgr->priv->lock); if (info != NULL) { long expire_time = info->expire_time; long now = (long)time(NULL); if (now - expire_time >= 0) { return NULL; } else { webaccess = g_object_new (SEAFILE_TYPE_WEB_ACCESS, "repo_id", info->repo_id, "obj_id", info->obj_id, "op", info->op, "username", info->username, NULL); if (info->use_onetime) { pthread_mutex_lock (&mgr->priv->lock); g_hash_table_remove (mgr->priv->access_token_hash, token); pthread_mutex_unlock (&mgr->priv->lock); } return webaccess; } } return NULL; } ================================================ FILE: server/web-accesstoken-mgr.h ================================================ /* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */ #ifndef WEB_ACCESSTOKEN_MGR_H #define WEB_ACCESSTOKEN_MGR_H struct _SeafileSession; struct WebATPriv; struct _SeafWebAccessTokenManager { struct _SeafileSession *seaf; struct WebATPriv *priv; }; typedef struct _SeafWebAccessTokenManager SeafWebAccessTokenManager; SeafWebAccessTokenManager* seaf_web_at_manager_new (struct _SeafileSession *seaf); int seaf_web_at_manager_start (SeafWebAccessTokenManager *mgr); /* * Returns an access token for the given access info. * If a token doesn't exist or has expired, generate and return a new one. */ char * seaf_web_at_manager_get_access_token (SeafWebAccessTokenManager *mgr, const char *repo_id, const char *obj_id, const char *op, const char *username, int use_onetime, GError **error); /* * Returns access info for the given token. */ SeafileWebAccess * seaf_web_at_manager_query_access_token (SeafWebAccessTokenManager *mgr, const char *token); #endif /* WEB_ACCESSTOKEN_MGR_H */ ================================================ FILE: server/zip-download-mgr.c ================================================ #include "common.h" #ifdef HAVE_EVHTP #include #include #include #include "utils.h" #include "log.h" #include "seafile-error.h" #include "seafile-session.h" #include "pack-dir.h" #include "web-accesstoken-mgr.h" #include "zip-download-mgr.h" #define MAX_ZIP_THREAD_NUM 5 #define SCAN_PROGRESS_INTERVAL 24 * 3600 // 1 day #define PROGRESS_TTL 5 * 3600 // 5 hours #define DEFAULT_MAX_DOWNLOAD_DIR_SIZE 100 * 1000000 /* 100MB */ typedef struct ZipDownloadMgrPriv { pthread_mutex_t progress_lock; GHashTable *progress_store; GThreadPool *zip_tpool; // Abnormal behavior lead to no download request for the zip finished progress, // so related progress will not be removed, // this timer is used to scan progress and remove invalid progress. CcnetTimer *scan_progress_timer; } ZipDownloadMgrPriv; void free_progress (Progress *progress) { if (!progress) return; if (g_file_test (progress->zip_file_path, G_FILE_TEST_EXISTS)) { g_unlink (progress->zip_file_path); } g_free (progress->zip_file_path); g_free (progress); } typedef enum DownloadType { DOWNLOAD_DIR, DOWNLOAD_MULTI } DownloadType; typedef struct DownloadObj { char *token; DownloadType type; SeafRepo *repo; char *user; gboolean is_windows; // download-dir: top dir name; download-multi: "" char *dir_name; // download-dir: obj_id; download-multi: dirent list void *internal; Progress *progress; } DownloadObj; static void free_download_obj (DownloadObj *obj) { if (!obj) return; g_free (obj->token); seaf_repo_unref (obj->repo); g_free (obj->user); g_free (obj->dir_name); if (obj->type == DOWNLOAD_DIR) { g_free ((char *)obj->internal); } else { g_list_free_full ((GList *)obj->internal, (GDestroyNotify)seaf_dirent_free); } g_free (obj); } static void start_zip_task (gpointer data, gpointer user_data); static int scan_progress (void *data); static int get_download_file_count (DownloadObj *obj, GError **error); static gboolean validate_download_size (DownloadObj *obj, GError **error); ZipDownloadMgr * zip_download_mgr_new () { GError *error = NULL; ZipDownloadMgr *mgr = g_new0 (ZipDownloadMgr, 1); ZipDownloadMgrPriv *priv = g_new0 (ZipDownloadMgrPriv, 1); priv->zip_tpool = g_thread_pool_new (start_zip_task, priv, MAX_ZIP_THREAD_NUM, FALSE, &error); if (!priv->zip_tpool) { if (error) { seaf_warning ("Failed to create zip task thread pool: %s.\n", error->message); g_clear_error (&error); } else { seaf_warning ("Failed to create zip task thread pool.\n"); } g_free (priv); g_free (mgr); return NULL; } pthread_mutex_init (&priv->progress_lock, NULL); priv->progress_store = g_hash_table_new_full (g_str_hash, g_str_equal, g_free, (GDestroyNotify)free_progress); priv->scan_progress_timer = ccnet_timer_new (scan_progress, priv, SCAN_PROGRESS_INTERVAL * 1000); mgr->priv = priv; return mgr; } static void remove_progress_by_token (ZipDownloadMgrPriv *priv, const char *token) { pthread_mutex_lock (&priv->progress_lock); g_hash_table_remove (priv->progress_store, token); pthread_mutex_unlock (&priv->progress_lock); } static int scan_progress (void *data) { time_t now = time(NULL); ZipDownloadMgrPriv *priv = data; GHashTableIter iter; gpointer key, value; Progress *progress; pthread_mutex_lock (&priv->progress_lock); g_hash_table_iter_init (&iter, priv->progress_store); while (g_hash_table_iter_next (&iter, &key, &value)) { progress = value; if (now >= progress->expire_ts) { g_hash_table_iter_remove (&iter); } } pthread_mutex_unlock (&priv->progress_lock); return TRUE; } static SeafileCrypt * get_seafile_crypt (SeafRepo *repo, const char *user) { SeafileCryptKey *key = NULL; char *key_hex, *iv_hex; unsigned char enc_key[32], enc_iv[16]; SeafileCrypt *crypt = NULL; key = seaf_passwd_manager_get_decrypt_key (seaf->passwd_mgr, repo->id, user); if (!key) { seaf_warning ("Failed to get derypt key for repo %.8s.\n", repo->id); return NULL; } g_object_get (key, "key", &key_hex, "iv", &iv_hex, NULL); if (repo->enc_version == 1) hex_to_rawdata (key_hex, enc_key, 16); else hex_to_rawdata (key_hex, enc_key, 32); hex_to_rawdata (iv_hex, enc_iv, 16); crypt = seafile_crypt_new (repo->enc_version, enc_key, enc_iv); g_free (key_hex); g_free (iv_hex); g_object_unref (key); return crypt; } static void start_zip_task (gpointer data, gpointer user_data) { DownloadObj *obj = data; ZipDownloadMgrPriv *priv = user_data; SeafRepo *repo = obj->repo; SeafileCrypt *crypt = NULL; int ret = 0; if (repo->encrypted) { crypt = get_seafile_crypt (repo, obj->user); if (!crypt) { ret = -1; goto out; } } if (!validate_download_size (obj, NULL)) { ret = -1; obj->progress->size_too_large = TRUE; goto out; } int file_count = get_download_file_count (obj, NULL); if (file_count < 0) { ret = -1; goto out; } obj->progress->total = file_count; ret = pack_files (repo->store_id, repo->version, obj->dir_name, obj->internal, crypt, obj->is_windows, obj->progress); out: if (crypt) { g_free (crypt); } if (ret == -1 && !obj->progress->canceled && !obj->progress->size_too_large) { obj->progress->internal_error = TRUE; } free_download_obj (obj); } static int parse_download_dir_data (DownloadObj *obj, const char *data) { json_t *jobj; json_error_t jerror; const char *dir_name; const char *obj_id; jobj = json_loadb (data, strlen(data), 0, &jerror); if (!jobj) { seaf_warning ("Failed to parse download dir data: %s.\n", jerror.text); return -1; } obj->is_windows = json_object_get_int_member (jobj, "is_windows"); dir_name = json_object_get_string_member (jobj, "dir_name"); if (!dir_name || strcmp (dir_name, "") == 0) { seaf_warning ("Invalid download dir data: miss dir_name filed.\n"); json_decref (jobj); return -1; } obj_id = json_object_get_string_member (jobj, "obj_id"); if (!obj_id || strcmp (obj_id, "") == 0) { seaf_warning ("Invalid download dir data: miss obj_id filed.\n"); json_decref (jobj); return -1; } obj->dir_name = g_strdup (dir_name); obj->internal = g_strdup (obj_id); json_decref (jobj); return 0; } static int parse_download_multi_data (DownloadObj *obj, const char *data) { json_t *jobj; SeafRepo *repo = obj->repo; const char *tmp_parent_dir; char *parent_dir; json_t *name_array; json_error_t jerror; int i; int len; const char *file_name; SeafDirent *dirent; SeafDir *dir; GList *dirent_list = NULL, *p = NULL; GError *error = NULL; jobj = json_loadb (data, strlen(data), 0, &jerror); if (!jobj) { seaf_warning ("Failed to parse download multi data: %s.\n", jerror.text); return -1; } obj->is_windows = json_object_get_int_member (jobj, "is_windows"); tmp_parent_dir = json_object_get_string_member (jobj, "parent_dir"); if (!tmp_parent_dir || strcmp (tmp_parent_dir, "") == 0) { seaf_warning ("Invalid download multi data, miss parent_dir field.\n"); json_decref (jobj); return -1; } name_array = json_object_get (jobj, "file_list"); if (!name_array) { seaf_warning ("Invalid download multi data, miss file_list field.\n"); json_decref (jobj); return -1; } len = json_array_size (name_array); if (len == 0) { seaf_warning ("Invalid download multi data, miss download file name.\n"); json_decref (jobj); return -1; } parent_dir = format_dir_path (tmp_parent_dir); dir = seaf_fs_manager_get_seafdir_by_path (seaf->fs_mgr, repo->store_id, repo->version, repo->root_id, parent_dir, &error); if (!dir) { if (error) { seaf_warning ("Failed to get dir %s repo %.8s: %s.\n", parent_dir, repo->store_id, error->message); g_clear_error(&error); } else { seaf_warning ("dir %s doesn't exist in repo %.8s.\n", parent_dir, repo->store_id); } g_free (parent_dir); json_decref (jobj); return -1; } GHashTable *dirent_hash = g_hash_table_new(g_str_hash, g_str_equal); for (p = dir->entries; p; p = p->next) { SeafDirent *d = p->data; g_hash_table_insert(dirent_hash, d->name, d); } for (i = 0; i < len; i++) { file_name = json_string_value (json_array_get (name_array, i)); if (strcmp (file_name, "") == 0) { seaf_warning ("Invalid download file name: %s.\n", file_name); if (dirent_list) { g_list_free_full (dirent_list, (GDestroyNotify)seaf_dirent_free); dirent_list = NULL; } break; } // Packing files in multi-level directories. if (strchr (file_name, '/') != NULL) { char *fullpath = g_build_path ("/", parent_dir, file_name, NULL); dirent = seaf_fs_manager_get_dirent_by_path (seaf->fs_mgr, repo->store_id, repo->version, repo->root_id, fullpath, &error); if (!dirent) { if (error) { seaf_warning ("Failed to get path %s repo %.8s: %s.\n", fullpath, repo->store_id, error->message); g_clear_error(&error); } else { seaf_warning ("Path %s doesn't exist in repo %.8s.\n", parent_dir, repo->store_id); } if (dirent_list) { g_list_free_full (dirent_list, (GDestroyNotify)seaf_dirent_free); dirent_list = NULL; } g_free (fullpath); break; } g_free (fullpath); dirent_list = g_list_prepend (dirent_list, dirent); } else { dirent = g_hash_table_lookup (dirent_hash, file_name); if (!dirent) { seaf_warning ("Failed to get dirent for %s in dir %s in repo %.8s.\n", file_name, parent_dir, repo->store_id); if (dirent_list) { g_list_free_full (dirent_list, (GDestroyNotify)seaf_dirent_free); dirent_list = NULL; } break; } dirent_list = g_list_prepend (dirent_list, seaf_dirent_dup(dirent)); } } g_hash_table_unref(dirent_hash); g_free (parent_dir); json_decref (jobj); seaf_dir_free (dir); if (!dirent_list) { return -1; } obj->dir_name = g_strdup (""); obj->internal = dirent_list; return 0; } static gint64 calcuate_download_multi_size (SeafRepo *repo, GList *dirent_list) { GList *iter = dirent_list; SeafDirent *dirent; gint64 size; gint64 total_size = 0; for (; iter; iter = iter->next) { dirent = iter->data; if (S_ISREG(dirent->mode)) { if (repo->version > 0) { size = dirent->size; } else { size = seaf_fs_manager_get_file_size (seaf->fs_mgr, repo->store_id, repo->version, dirent->id); } if (size < 0) { seaf_warning ("Failed to get file %s size.\n", dirent->name); return -1; } total_size += size; } else if (S_ISDIR(dirent->mode)) { size = seaf_fs_manager_get_fs_size (seaf->fs_mgr, repo->store_id, repo->version, dirent->id); if (size < 0) { seaf_warning ("Failed to get dir %s size.\n", dirent->name); return -1; } total_size += size; } } return total_size; } static int calcuate_download_multi_file_count (SeafRepo *repo, GList *dirent_list) { GList *iter = dirent_list; SeafDirent *dirent; int cur_count; int count = 0; for (; iter; iter = iter->next) { dirent = iter->data; if (S_ISREG(dirent->mode)) { count += 1; } else if (S_ISDIR(dirent->mode)) { cur_count = seaf_fs_manager_count_fs_files (seaf->fs_mgr, repo->store_id, repo->version, dirent->id); if (cur_count < 0) { seaf_warning ("Failed to get dir %s file count.\n", dirent->name); return -1; } count += cur_count; } } return count; } static gboolean validate_download_size (DownloadObj *obj, GError **error) { SeafRepo *repo = obj->repo; gint64 download_size; gint64 max_download_dir_size; if (obj->type == DOWNLOAD_DIR) { download_size = seaf_fs_manager_get_fs_size (seaf->fs_mgr, repo->store_id, repo->version, (char *)obj->internal); } else { download_size = calcuate_download_multi_size (repo, (GList *)obj->internal); } /* default is MB */ max_download_dir_size = seaf_cfg_manager_get_config_int64 (seaf->cfg_mgr, "fileserver", "max_download_dir_size"); if (max_download_dir_size > 0) max_download_dir_size = max_download_dir_size * 1000000; else max_download_dir_size = DEFAULT_MAX_DOWNLOAD_DIR_SIZE; if (download_size < 0) { seaf_warning ("Failed to get download size.\n"); g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "Failed to get download size."); return FALSE; } else if (download_size > max_download_dir_size) { seaf_warning ("Total download size %"G_GINT64_FORMAT ", exceed max download dir size %"G_GINT64_FORMAT".\n", download_size, max_download_dir_size); g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "Download size exceed max download dir size."); return FALSE; } return TRUE; } static int get_download_file_count (DownloadObj *obj, GError **error) { int file_count; SeafRepo *repo = obj->repo; if (obj->type == DOWNLOAD_DIR) { file_count = seaf_fs_manager_count_fs_files (seaf->fs_mgr, repo->store_id, repo->version, (char *)obj->internal); } else { file_count = calcuate_download_multi_file_count (repo, (GList *)obj->internal); } if (file_count < 0) { seaf_warning ("Failed to get download file count.\n"); g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "Failed to get download file count."); return -1; } return file_count; } int zip_download_mgr_start_zip_task (ZipDownloadMgr *mgr, const char *token, SeafileWebAccess *info, GError **error) { const char *repo_id; const char *data; const char *operation; SeafRepo *repo; DownloadObj *obj; Progress *progress; int ret = 0; ZipDownloadMgrPriv *priv = mgr->priv; repo_id = seafile_web_access_get_repo_id (info); repo = seaf_repo_manager_get_repo (seaf->repo_mgr, repo_id); if (!repo) { seaf_warning ("Failed to get repo %.8s.\n", repo_id); g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "Failed to get repo."); return -1; } data = seafile_web_access_get_obj_id (info); operation = seafile_web_access_get_op (info); obj = g_new0 (DownloadObj, 1); obj->token = g_strdup (token); obj->repo = repo; obj->user = g_strdup (seafile_web_access_get_username (info)); if (strcmp (operation, "download-dir") == 0 || strcmp (operation, "download-dir-link") == 0) { obj->type = DOWNLOAD_DIR; ret = parse_download_dir_data (obj, data); if (ret < 0) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "Failed to parse download dir data."); goto out; } if (!seaf_fs_manager_object_exists (seaf->fs_mgr, repo->store_id, repo->version, (char *)obj->internal)) { seaf_warning ("Dir %s doesn't exist.\n", (char *)obj->internal); g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "Dir doesn't exist."); ret = -1; goto out; } } else { obj->type = DOWNLOAD_MULTI; ret = parse_download_multi_data (obj, data); if (ret < 0) { g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "Failed to parse download multi data."); goto out; } } progress = g_new0 (Progress, 1); /* Set to real total in worker thread. Here to just prevent the client from thinking * the zip has been finished too early. */ progress->total = 1; progress->expire_ts = time(NULL) + PROGRESS_TTL; obj->progress = progress; pthread_mutex_lock (&priv->progress_lock); g_hash_table_replace (priv->progress_store, g_strdup (token), progress); pthread_mutex_unlock (&priv->progress_lock); g_thread_pool_push (priv->zip_tpool, obj, NULL); out: if (ret < 0) { free_download_obj (obj); } return ret; } /* #define TOKEN_LEN 36 static char * gen_new_token (GHashTable *token_hash) { char uuid[37]; char *token; while (1) { gen_uuid_inplace (uuid); token = g_strndup(uuid, TOKEN_LEN); // Make sure the new token doesn't conflict with an existing one. if (g_hash_table_lookup (token_hash, token) != NULL) g_free (token); else return token; } } char * zip_download_mgr_start_zip_task_v2 (ZipDownloadMgr *mgr, const char *repo_id, const char *operation, const char *user, GList *dirent_list) { SeafRepo *repo = NULL; char *token = NULL; char *task_id = NULL; char *filename = NULL; DownloadObj *obj; Progress *progress; ZipDownloadMgrPriv *priv = mgr->priv; repo = seaf_repo_manager_get_repo(seaf->repo_mgr, repo_id); if (!repo) { seaf_warning ("Failed to get repo %s\n", repo_id); return NULL; } obj = g_new0 (DownloadObj, 1); obj->repo = repo; obj->user = g_strdup (user); if (strcmp (operation, "download-dir") == 0 || strcmp (operation, "download-dir-link") == 0) { obj->type = DOWNLOAD_DIR; SeafDirent *dent = dirent_list->data; obj->dir_name = g_strdup (dent->name); obj->internal = g_strdup (dent->id); filename = g_strdup (obj->dir_name); g_list_free_full (dirent_list, (GDestroyNotify)seaf_dirent_free); } else { obj->type = DOWNLOAD_MULTI; obj->dir_name = g_strdup(""); obj->internal = dirent_list; time_t now = time(NULL); char date_str[11]; strftime(date_str, sizeof(date_str), "%Y-%m-%d", localtime(&now)); filename = g_strconcat (MULTI_DOWNLOAD_FILE_PREFIX, date_str, NULL); } progress = g_new0 (Progress, 1); // Set to real total in worker thread. Here to just prevent the client from thinking // the zip has been finished too early. progress->total = 1; progress->expire_ts = time(NULL) + PROGRESS_TTL; progress->zip_file_name = filename; obj->progress = progress; pthread_mutex_lock (&priv->progress_lock); token = gen_new_token (priv->progress_store); g_hash_table_replace (priv->progress_store, token, progress); pthread_mutex_unlock (&priv->progress_lock); obj->token = g_strdup (token); task_id = g_strdup (token); g_thread_pool_push (priv->zip_tpool, obj, NULL); return task_id; } */ static Progress * get_progress_obj (ZipDownloadMgrPriv *priv, const char *token) { Progress *progress; pthread_mutex_lock (&priv->progress_lock); progress = g_hash_table_lookup (priv->progress_store, token); pthread_mutex_unlock (&priv->progress_lock); return progress; } char * zip_download_mgr_query_zip_progress (ZipDownloadMgr *mgr, const char *token, GError **error) { Progress *progress; json_t *obj; char *info; progress = get_progress_obj (mgr->priv, token); if (!progress) return NULL; obj = json_object (); json_object_set_int_member (obj, "zipped", g_atomic_int_get (&progress->zipped)); json_object_set_int_member (obj, "total", progress->total); if (progress->size_too_large) { json_object_set_int_member (obj, "failed", 1); json_object_set_string_member (obj, "failed_reason", "size too large"); } else if (progress->internal_error) { json_object_set_int_member (obj, "failed", 1); json_object_set_string_member (obj, "failed_reason", "internal error"); } else { json_object_set_int_member (obj, "failed", 0); json_object_set_string_member (obj, "failed_reason", ""); } if (progress->canceled) json_object_set_int_member (obj, "canceled", 1); else json_object_set_int_member (obj, "canceled", 0); if (progress->size_too_large || progress->canceled || progress->internal_error) remove_progress_by_token(mgr->priv, token); info = json_dumps (obj, JSON_COMPACT); json_decref (obj); return info; } char * zip_download_mgr_get_zip_file_path (struct ZipDownloadMgr *mgr, const char *token) { Progress *progress; progress = get_progress_obj (mgr->priv, token); if (!progress) { return NULL; } return progress->zip_file_path; } /* char * zip_download_mgr_get_zip_file_name (struct ZipDownloadMgr *mgr, const char *token) { Progress *progress; progress = get_progress_obj (mgr->priv, token); if (!progress) { return NULL; } return progress->zip_file_name; } */ void zip_download_mgr_del_zip_progress (ZipDownloadMgr *mgr, const char *token) { remove_progress_by_token (mgr->priv, token); } int zip_download_mgr_cancel_zip_task (ZipDownloadMgr *mgr, const char *token) { Progress *progress = get_progress_obj (mgr->priv, token); if (progress) progress->canceled = TRUE; return 0; } #endif ================================================ FILE: server/zip-download-mgr.h ================================================ #ifndef ZIP_DOWNLOAD_MGR_H #define ZIP_DOWNLOAD_MGR_H #ifdef HAVE_EVHTP #include "seafile-object.h" #define MULTI_DOWNLOAD_FILE_PREFIX "documents-export-" struct ZipDownloadMgrPriv; typedef struct ZipDownloadMgr { struct ZipDownloadMgrPriv *priv; } ZipDownloadMgr; ZipDownloadMgr * zip_download_mgr_new (); int zip_download_mgr_start_zip_task (ZipDownloadMgr *mgr, const char *token, SeafileWebAccess *info, GError **error); char * zip_download_mgr_start_zip_task_v2 (ZipDownloadMgr *mgr, const char *repo_id, const char *operation, const char *user, GList *dirent_list); char * zip_download_mgr_query_zip_progress (ZipDownloadMgr *mgr, const char *token, GError **error); char * zip_download_mgr_get_zip_file_path (ZipDownloadMgr *mgr, const char *token); char * zip_download_mgr_get_zip_file_name (ZipDownloadMgr *mgr, const char *token); void zip_download_mgr_del_zip_progress (ZipDownloadMgr *mgr, const char *token); int zip_download_mgr_cancel_zip_task (ZipDownloadMgr *mgr, const char *token); #endif #endif ================================================ FILE: tests/__init__.py ================================================ ================================================ FILE: tests/conf/ccnet.conf ================================================ [General] USER_NAME = server ID = 8e4b13b49ca79f35732d9f44a0804940d985627c NAME = server SERVICE_URL = http://127.0.0.1 [Network] PORT = 10002 [Client] PORT = 9999 [Database] CREATE_TABLES = true ENGINE = mysql HOST = 127.0.0.1 USER = seafile PASSWD = seafile DB = ccnet_db CONNECTION_CHARSET=utf8 #[Database] #ENGINE = mysql #HOST = 127.0.0.1 #USER = seafile #PASSWD = root #DB = ccnet-db #CREATE_TABLES=true ================================================ FILE: tests/conf/mykey.peer ================================================ -----BEGIN RSA PRIVATE KEY----- MIIEpAIBAAKCAQEAuZFwgxkKQGaqYyFMxIUz1JHnZPaOgEQ+fX/jRVYbGMiHkSbX K9X3XUHUGEjUt8b3zW6UZJGjgyV5S08YuaN0eE5z6Q6bnuWEhkTmgZgXaybc9Hiu y2WAHpKj+qbXcmewE0WEys/Ov9AIe0TRXmvL6r1793VcLSzgb/aIQA2WFg97DfEA hGAHo5BesKRfEEvXL6ZB9cGxXP9qIy0ObTvLXlOgbYchfV4rrXJk0u9xWjRyXABv 2Myv3fgxmGmTR+TAw2G5GCKeh9IoIuWVMGPyjSlERGMqQYymNz3NgyWFayyZ5HQS tihCnflOGEiMHRkOwIczB16YZhan2YqKpsjHGwIBIwKCAQEArvbXzBBLfoyvR4XM Cb9rYgXozOh3usQAZ7MYHM2HQ0C6VahHN/WgFhl+1RF4Gv1tTKoW4nqwHJEL9oxn xPkzTNxBZrYAcT7NaKdc/diLG+LQVDdFuHWkrxyL+vUUR0vR5kjcSjGlrYmhmMvb WQaNEIbFVwhA92TTnMPfjNmcI2wRKI1K9NEKDAMIPSwW/sgkls2h4KW3Y7DooJ0k l0apjN/rlaR4ohZp6oMVifW8GFY43Xau+4dIrYTnvvSyvGvtB+8cWuhqqvWHRZdM rFjgOJoZH5l0zxt2dYW2WFiqgT7xXsvu6L+nylXktEMxC33rehYdPrd427J409A6 caO5cwKBgQDyrBQ8UXu7cDAktiKTwH7+pA0wNyTvKsGYw0RcFILccpxty2r5gYhI eLFPVyjoYxwauW6vX3cSAYLKR+2PlYvkPpEvBQIJbaurx++ejez/KxYD65ZeFTfs Kb9A08hgMxCvJmnRvojhez1OZmmmWYPT57XeZXnCiNoyJWKA0mMNvwKBgQDDwn02 o5n7ugetXIlV1PiStVogPPTBobh9jsXooQFh4fB+lsrO082hapMlbVVNG1gLzvTY V0oDM/AzdnC6feZlAEdM+IcruinVnMnbnhiwPVDInCJIhvmJ/XScvkTsgHwRiAss Tlf8wH/uGXiaeVV/KMlkKRK6h54znTPq37/VpQKBgQDkziG1NuJgRTS05j3bxB/3 Z3omJV1Wh2YTsMtswuHIiVGpWWTcnrOyC2VZb2+2iVUDQR83oycfmwZJsYg27BYu +SnNPzxvSiWEtTJiS00rGf7QfwoeMUNbAspEb+jPux5b/6WZ34hfkXRRO/02cagu Mj3DDzhJtDtxG+8pAOEM9QKBgQC+KqWFiPv72UlJUpQKPJmzFpIQsD44cTbgXs7h +32viwbhX0irqS4nxp2SEnAfBJ6sYqS05xSyp3uftOKJRxpTfJ0I8W1drYe5kP6a 1Bf7qUcpRzc/JAhaKWn3Wb9MJQrPM7MVGOfCVJmINgAhCCcrEa2xwX/oZnxsp1cB a6RpIwKBgQDW15IebNwVOExTqtfh6UvIjMSrk9OoHDyjoPLI3eyPt3ujKdXFJ8qF CWg9ianQyE5Y8vfDI+x1YRCOwq2WapeXzkSO8CzVFHgz5kFqJQolr4+o6wr5mLLC +6iW9u81/X3bMAWshtNfsWbRSFLT1WNVTKRg+xO7YG/3wcyeIeqigA== -----END RSA PRIVATE KEY----- ================================================ FILE: tests/config.py ================================================ USER = 'testuser@test.seafile.com' PASSWORD = 'testuser' USER2 = 'testuser2@test.seafile.com' PASSWORD2 = 'testuser2' ADMIN_USER = 'adminuser@test.seafile.com' ADMIN_PASSWORD = 'adminuser' INACTIVE_USER = 'inactiveuser@test.seafile.com' INACTIVE_PASSWORD = 'inactiveuser' ================================================ FILE: tests/conftest.py ================================================ #coding: UTF-8 import logging import os import pytest from tenacity import retry, stop_after_attempt, wait_fixed from tests.config import ( ADMIN_PASSWORD, ADMIN_USER, INACTIVE_PASSWORD, INACTIVE_USER, PASSWORD, PASSWORD2, USER, USER2 ) from tests.utils import create_and_get_repo, randstring, create_and_get_group from seaserv import ccnet_api, seafile_api logger = logging.getLogger(__name__) @retry(wait=wait_fixed(2), stop=stop_after_attempt(10)) def wait_for_server(): seafile_api.get_repo_list(0, 1, None) @pytest.fixture(scope='session', autouse=True) def create_users(): """ Create an admin user and a normal user """ wait_for_server() logger.info('preparing users for testing') ccnet_api.add_emailuser(USER, PASSWORD, is_staff=False, is_active=True) ccnet_api.add_emailuser(USER2, PASSWORD2, is_staff=False, is_active=True) ccnet_api.add_emailuser( INACTIVE_USER, INACTIVE_PASSWORD, is_staff=False, is_active=False ) ccnet_api.add_emailuser( ADMIN_USER, ADMIN_PASSWORD, is_staff=True, is_active=True ) @pytest.yield_fixture(scope='function') def encrypted_repo(): repo = create_and_get_repo( 'test_repo_{}'.format(randstring(10)), '', USER, passwd='123' ) try: seafile_api.post_dir(repo.id, '/', 'dir1', USER) seafile_api.post_dir(repo.id, '/', 'dir2', USER) seafile_api.post_dir(repo.id, '/dir1', 'subdir1', USER) seafile_api.post_dir(repo.id, '/dir2', 'subdir2', USER) yield repo finally: if seafile_api.get_repo(repo.id): # The repo may be deleted in the test case seafile_api.remove_repo(repo.id) @pytest.yield_fixture(scope='function') def repo(): repo = create_and_get_repo( 'test_repo_{}'.format(randstring(10)), '', USER, passwd=None ) try: seafile_api.post_dir(repo.id, '/', 'dir1', USER) seafile_api.post_dir(repo.id, '/', 'dir2', USER) yield repo finally: if seafile_api.get_repo(repo.id): # The repo may be deleted in the test case seafile_api.remove_repo(repo.id) @pytest.yield_fixture(scope='function') def group(): group = create_and_get_group( 'test_group_{}'.format(randstring(10)), USER, gtype=None ) try: yield group finally: if ccnet_api.get_group(group.id): ccnet_api.remove_group(group.id) ================================================ FILE: tests/test_file_operation/test_file_operation.py ================================================ import pytest import os import time import json from tests.config import USER from seaserv import seafile_api as api file_name = 'test.txt' new_file_name = 'new_test.txt' new_file_name_2 = 'new_test_2.txt' empty_file_name = 'empty_test.txt' new_empty_file_name = 'new_empty_test.txt' file_content = 'test file content' file_path = os.getcwd() + '/' + file_name dir_name = "test_dir" def create_the_file (): with open(file_path, 'w') as fp: fp.write(file_content) @pytest.mark.parametrize('in_batch', [True, False]) def test_file_operation(in_batch): t_repo_version = 1 t_repo_id1 = api.create_repo('test_file_operation1', '', USER, passwd = None) create_the_file() # test post_file assert api.post_file(t_repo_id1, file_path, '/', file_name, USER) == 0 t_file_id = api.get_file_id_by_path(t_repo_id1, '/' + file_name) t_file_size = len(file_content) assert t_file_size == api.get_file_size(t_repo_id1, t_repo_version, t_file_id) # test post_dir assert api.post_dir(t_repo_id1, '/', dir_name, USER) == 0 # test copy_file (synchronize) t_copy_file_result1 = api.copy_file(t_repo_id1, '/', '[\"'+file_name+'\"]', t_repo_id1, '/', '[\"'+new_file_name+'\"]', USER, 0, 1) assert t_copy_file_result1 assert t_copy_file_result1.task_id is None assert not t_copy_file_result1.background t_file_id = api.get_file_id_by_path(t_repo_id1, '/' + new_file_name) assert t_file_size == api.get_file_size(t_repo_id1, t_repo_version, t_file_id) # test copy_file (asynchronous) t_repo_id2 = api.create_repo('test_file_operation2', '', USER, passwd = None) usage = api.get_user_self_usage (USER) api.set_user_quota(USER, usage + 1); t_copy_file_result2 = api.copy_file(t_repo_id1, '/', '[\"'+file_name+'\"]', t_repo_id2, '/', '[\"'+file_name+'\"]', USER, 1, 0) assert t_copy_file_result2 assert t_copy_file_result2.background while True: time.sleep(0.1) t_copy_task = api.get_copy_task(t_copy_file_result2.task_id) assert t_copy_task.failed assert t_copy_task.failed_reason == 'Quota is full' if t_copy_task.failed: break; api.set_user_quota(USER, -1); t_copy_file_result2 = api.copy_file(t_repo_id1, '/', '[\"'+file_name+'\"]', t_repo_id2, '/', '[\"'+file_name+'\"]', USER, 1, 0) assert t_copy_file_result2 assert t_copy_file_result2.task_id assert t_copy_file_result2.background while True: time.sleep(0.1) t_copy_task = api.get_copy_task(t_copy_file_result2.task_id) if t_copy_task.successful: break; t_file_id = api.get_file_id_by_path(t_repo_id2, '/' + file_name) assert t_file_size == api.get_file_size(t_repo_id2, t_repo_version, t_file_id) # test move_file (synchronize) t_move_file_info1 = api.get_dirent_by_path(t_repo_id1, '/' + new_file_name) t_move_file_result1 = api.move_file(t_repo_id1, '/', '[\"'+new_file_name+'\"]', t_repo_id1, '/' + dir_name, '[\"'+new_file_name+'\"]', 1, USER, 0, 1) assert t_move_file_result1 t_move_file_info2 = api.get_dirent_by_path(t_repo_id1, '/' + dir_name + '/' + new_file_name) assert t_move_file_info1.mtime == t_move_file_info2.mtime t_file_id = api.get_file_id_by_path(t_repo_id1, '/' + new_file_name) assert t_file_id is None # test move_file (synchronize) t_move_file_result1 = api.move_file(t_repo_id1, '/' + dir_name, '[\"'+new_file_name+'\"]', t_repo_id1, '/', '[\"'+new_file_name_2+'\"]', 1, USER, 0, 1) assert t_move_file_result1 t_file_id = api.get_file_id_by_path(t_repo_id1, '/' + dir_name + '/' + new_file_name) assert t_file_id is None # test move_file (asynchronous) usage = api.get_user_self_usage (USER) api.set_user_quota(USER, usage + 1); t_move_file_result2 = api.move_file(t_repo_id1, '/', '[\"'+file_name+'\"]', t_repo_id2, '/' , '[\"'+new_file_name+'\"]', 1, USER, 1, 0) assert t_move_file_result2 assert t_move_file_result2.task_id assert t_move_file_result2.background while True: time.sleep(0.1) t_move_task = api.get_copy_task(t_move_file_result2.task_id) assert t_move_task.failed assert t_move_task.failed_reason == 'Quota is full' if t_move_task.failed: break api.set_user_quota(USER, -1); t_move_file_result2 = api.move_file(t_repo_id1, '/', '[\"'+file_name+'\"]', t_repo_id2, '/' , '[\"'+new_file_name+'\"]', 1, USER, 1, 0) assert t_move_file_result2 assert t_move_file_result2.task_id assert t_move_file_result2.background while True: time.sleep(0.1) t_move_task = api.get_copy_task(t_move_file_result2.task_id) if t_move_task.successful: break t_file_id = api.get_file_id_by_path(t_repo_id2, '/' + new_file_name) assert t_file_size == api.get_file_size(t_repo_id2, t_repo_version, t_file_id) # test post_empty_file assert api.post_empty_file(t_repo_id1, '/' + dir_name, empty_file_name, USER) == 0 t_file_id = api.get_file_id_by_path(t_repo_id1, '/' + dir_name + '/' + empty_file_name) assert api.get_file_size(t_repo_id1, t_repo_version, t_file_id) == 0 # test rename_file assert api.rename_file(t_repo_id1, '/' + dir_name, empty_file_name, new_empty_file_name, USER) == 0 #test put_file t_new_file_id = api.put_file(t_repo_id1, file_path, '/' + dir_name, new_empty_file_name, USER, None) assert t_new_file_id # test get_file_revisions t_commit_list = api.get_file_revisions(t_repo_id2, None, '/' + file_name, 2) assert t_commit_list assert len(t_commit_list) == 2 assert t_commit_list[0].creator_name == USER # test del_file if in_batch: assert api.batch_del_files(t_repo_id2, '[\"'+'/'+file_name+'\"]', USER) == 0 else: assert api.del_file(t_repo_id2, '/', '[\"'+file_name+'\"]', USER) == 0 # test get_deleted t_deleted_file_list = api.get_deleted(t_repo_id2, 1) assert t_deleted_file_list assert len(t_deleted_file_list) == 2 assert t_deleted_file_list[0].obj_name == file_name assert t_deleted_file_list[0].basedir == '/' # test del a non-exist file. should return 0. if in_batch: file_list = ["/"+file_name, "/"+new_file_name] assert api.batch_del_files(t_repo_id2, json.dumps(file_list), USER) == 0 t_deleted_file_list = api.get_deleted(t_repo_id2, 1) assert t_deleted_file_list assert len(t_deleted_file_list) == 3 file_list = ["/"+dir_name+"/"+new_empty_file_name, "/"+dir_name+"/"+new_file_name, "/"+new_file_name_2] assert api.batch_del_files(t_repo_id1, json.dumps(file_list), USER) == 0 t_deleted_file_list = api.get_deleted(t_repo_id1, 1) assert t_deleted_file_list assert len(t_deleted_file_list) == 4 else: assert api.del_file(t_repo_id2, '/', '[\"'+file_name+'\"]', USER) == 0 assert api.del_file(t_repo_id1, '/' + dir_name, '[\"'+new_empty_file_name+'\"]', USER) == 0 assert api.del_file(t_repo_id1, '/' + dir_name, '[\"'+new_file_name+'\"]', USER) == 0 assert api.del_file(t_repo_id2, '/', '[\"'+new_file_name+'\"]', USER) == 0 assert api.del_file(t_repo_id1, '/', '[\"'+new_file_name_2+'\"]', USER) == 0 time.sleep(1) api.remove_repo(t_repo_id1) api.remove_repo(t_repo_id2) ================================================ FILE: tests/test_file_operation/test_merge_virtual_repo.py ================================================ import pytest import requests import os import time from tests.config import USER, USER2 from seaserv import seafile_api as api from requests_toolbelt import MultipartEncoder file_name = 'file.txt' file_name_not_replaced = 'file (1).txt' file_path = os.getcwd() + '/' + file_name file_content = 'File content.\r\n' file_size = len(file_content) resumable_file_name = 'resumable.txt' resumable_test_file_name = 'test/resumable.txt' chunked_part1_name = 'part1.txt' chunked_part2_name = 'part2.txt' chunked_part1_path = os.getcwd() + '/' + chunked_part1_name chunked_part2_path = os.getcwd() + '/' + chunked_part2_name chunked_part1_content = 'First line.\r\n' chunked_part2_content = 'Second line.\r\n' total_size = len(chunked_part1_content) + len(chunked_part2_content) #File_id is not used when upload files, but #the argument obj_id of get_fileserver_access_token shouldn't be NULL. file_id = '0000000000000000000000000000000000000000' def create_test_file(): fp = open(file_path, 'w') fp.close() fp = open(chunked_part1_path, 'w') fp.close() fp = open(chunked_part2_path, 'w') fp.close() def create_test_dir(repo, dir_name): parent_dir = '/' api.post_dir(repo.id,parent_dir,dir_name,USER) def assert_upload_response(response, replace, file_exist): assert response.status_code == 200 response_json = response.json() assert response_json[0]['size'] == 0 assert response_json[0]['id'] == file_id if file_exist and not replace: assert response_json[0]['name'] == file_name_not_replaced else: assert response_json[0]['name'] == file_name def assert_resumable_upload_response(response, repo_id, file_name, upload_complete): assert response.status_code == 200 if not upload_complete: assert response.text == '{"success": true}' offset = api.get_upload_tmp_file_offset(repo_id, '/' + file_name) assert offset == len(chunked_part1_content) else: response_json = response.json() assert response_json[0]['size'] == total_size new_file_id = response_json[0]['id'] assert len(new_file_id) == 40 and new_file_id != file_id assert response_json[0]['name'] == resumable_file_name def assert_update_response(response, is_json): assert response.status_code == 200 if is_json: response_json = response.json() assert response_json[0]['size'] == file_size new_file_id = response_json[0]['id'] assert len(new_file_id) == 40 and new_file_id != file_id assert response_json[0]['name'] == file_name else: new_file_id = response.text assert len(new_file_id) == 40 and new_file_id != file_id def request_resumable_upload(filepath, headers,upload_url_base,parent_dir,is_ajax): write_file(chunked_part1_path, chunked_part1_content) write_file(chunked_part2_path, chunked_part2_content) m = MultipartEncoder( fields={ 'parent_dir': parent_dir, 'file': (resumable_file_name, open(filepath, 'rb'), 'application/octet-stream') }) params = {'ret-json':'1'} headers["Content-type"] = m.content_type if is_ajax: response = requests.post(upload_url_base, headers = headers, data = m) else: response = requests.post(upload_url_base, headers = headers, data = m, params = params) return response def write_file(file_path, file_content): fp = open(file_path, 'w') fp.write(file_content) fp.close() def del_local_files(): os.remove(file_path) os.remove(chunked_part1_path) os.remove(chunked_part2_path) def test_merge_virtual_repo(repo): api.post_dir(repo.id, '/dir1', 'subdir1', USER) api.post_dir(repo.id, '/dir2', 'subdir2', USER) v_repo_id = api.share_subdir_to_user(repo.id, '/dir1', USER, USER2, 'rw') create_test_file() params = {'ret-json':'1'} obj_id = '{"parent_dir":"/"}' create_test_dir(repo,'test') #test upload file to vritual repo root dir. token = api.get_fileserver_access_token(v_repo_id, obj_id, 'upload', USER2, False) upload_url_base = 'http://127.0.0.1:8082/upload-api/' + token m = MultipartEncoder( fields={ 'parent_dir': '/', 'file': (file_name, open(file_path, 'rb'), 'application/octet-stream') }) response = requests.post(upload_url_base, params = params, data = m, headers = {'Content-Type': m.content_type}) assert_upload_response(response, False, False) time.sleep (1.5) repo_size = api.get_repo_size (v_repo_id) assert repo_size == 0 time.sleep (1.5) repo_size = api.get_repo_size (repo.id) assert repo_size == 0 #test resumable upload file to virtual repo root dir parent_dir = '/' headers = {'Content-Range':'bytes 0-{}/{}'.format(str(len(chunked_part1_content) - 1), str(total_size)), 'Content-Disposition':'attachment; filename=\"{}\"'.format(resumable_file_name)} response = request_resumable_upload(chunked_part1_path,headers, upload_url_base,parent_dir, False) assert_resumable_upload_response(response, v_repo_id, resumable_file_name, False) time.sleep (1.5) v_repo_size = api.get_repo_size (v_repo_id) assert v_repo_size == 0 time.sleep (1.5) repo_size = api.get_repo_size (repo.id) assert repo_size == 0 headers = {'Content-Range':'bytes {}-{}/{}'.format(str(len(chunked_part1_content)), str(total_size - 1), str(total_size)), 'Content-Disposition':'attachment; filename=\"{}\"'.format(resumable_file_name)} response = request_resumable_upload(chunked_part2_path, headers, upload_url_base, parent_dir, False) assert_resumable_upload_response(response, v_repo_id, resumable_file_name, True) time.sleep (2.5) v_repo_size = api.get_repo_size (v_repo_id) assert v_repo_size == total_size time.sleep (1.5) repo_size = api.get_repo_size (repo.id) assert repo_size == total_size #test update file to virtual repo. write_file(file_path, file_content) token = api.get_fileserver_access_token(v_repo_id, obj_id, 'update', USER2, False) update_url_base = 'http://127.0.0.1:8082/update-api/' + token m = MultipartEncoder( fields={ 'target_file': '/' + file_name, 'file': (file_name, open(file_path, 'rb'), 'application/octet-stream') }) response = requests.post(update_url_base, data = m, headers = {'Content-Type': m.content_type}) assert_update_response(response, False) time.sleep (1.5) v_repo_size = api.get_repo_size (v_repo_id) assert v_repo_size == total_size + file_size time.sleep (1.5) repo_size = api.get_repo_size (repo.id) assert repo_size == total_size + file_size api.del_file(v_repo_id, '/', '[\"'+file_name+'\"]', USER2) time.sleep (1.5) v_repo_size = api.get_repo_size (v_repo_id) assert v_repo_size == total_size time.sleep (1.5) repo_size = api.get_repo_size (repo.id) assert repo_size == total_size api.del_file(v_repo_id, '/', '[\"'+resumable_file_name+'\"]', USER2) time.sleep (1.5) v_repo_size = api.get_repo_size (v_repo_id) assert v_repo_size == 0 time.sleep (1.5) repo_size = api.get_repo_size (repo.id) assert repo_size == 0 api.del_file(repo.id, '/dir1', '[\"subdir1\"]', USER) api.del_file(repo.id, '/dir2', '[\"subdir1\"]', USER) assert api.unshare_subdir_for_user(repo.id, '/dir1', USER, USER2) == 0 del_local_files() ================================================ FILE: tests/test_file_operation/test_search_files.py ================================================ import pytest import os import time from tests.config import USER from seaserv import seafile_api as api file_name = 'test.txt' file_content = 'test file content' file_path = os.getcwd() + '/' + file_name dir_name = "test_dir" def create_the_file (): with open(file_path, 'w') as fp: fp.write(file_content) def test_file_operation(): t_repo_version = 1 t_repo_id1 = api.create_repo('test_file_operation1', '', USER, passwd = None) create_the_file() assert api.post_file(t_repo_id1, file_path, '/', file_name, USER) == 0 assert api.post_dir(t_repo_id1, '/', dir_name, USER) == 0 #test search files file_list = api.search_files (t_repo_id1, "test") assert len(file_list) == 2 assert file_list[0].path == "/test.txt" assert file_list[0].is_dir == False assert file_list[1].path == "/test_dir" assert file_list[1].is_dir == True file_list = api.search_files (t_repo_id1, "dir") assert len(file_list) == 1 assert file_list[0].path == "/test_dir" assert file_list[0].is_dir == True file_list = api.search_files (t_repo_id1, "DiR") assert len(file_list) == 1 assert file_list[0].path == "/test_dir" assert file_list[0].is_dir == True api.remove_repo(t_repo_id1) ================================================ FILE: tests/test_file_operation/test_upload_and_update.py ================================================ import pytest import requests import os import time from tests.config import USER from seaserv import seafile_api as api from requests_toolbelt import MultipartEncoder file_name = 'file.txt' file_name_not_replaced = 'file (1).txt' file_path = os.getcwd() + '/' + file_name file_content = 'File content.\r\n' file_size = len(file_content) resumable_file_name = 'resumable.txt' resumable_test_file_name = 'test/resumable.txt' chunked_part1_name = 'part1.txt' chunked_part2_name = 'part2.txt' chunked_part1_path = os.getcwd() + '/' + chunked_part1_name chunked_part2_path = os.getcwd() + '/' + chunked_part2_name chunked_part1_content = 'First line.\r\n' chunked_part2_content = 'Second line.\r\n' total_size = len(chunked_part1_content) + len(chunked_part2_content) #File_id is not used when upload files, but #the argument obj_id of get_fileserver_access_token shouldn't be NULL. file_id = '0000000000000000000000000000000000000000' def create_test_file(): fp = open(file_path, 'w') fp.close() fp = open(chunked_part1_path, 'w') fp.close() fp = open(chunked_part2_path, 'w') fp.close() def create_test_dir(repo, dir_name): parent_dir = '/' api.post_dir(repo.id,parent_dir,dir_name,USER) def assert_upload_response(response, replace, file_exist): assert response.status_code == 200 response_json = response.json() assert response_json[0]['size'] == 0 assert response_json[0]['id'] == file_id if file_exist and not replace: assert response_json[0]['name'] == file_name_not_replaced else: assert response_json[0]['name'] == file_name def assert_resumable_upload_response(response, repo_id, file_name, upload_complete): assert response.status_code == 200 if not upload_complete: assert response.text == '{"success": true}' offset = api.get_upload_tmp_file_offset(repo_id, '/' + file_name) assert offset == len(chunked_part1_content) else: response_json = response.json() assert response_json[0]['size'] == total_size new_file_id = response_json[0]['id'] assert len(new_file_id) == 40 and new_file_id != file_id assert response_json[0]['name'] == resumable_file_name def assert_update_response(response, is_json): assert response.status_code == 200 if is_json: response_json = response.json() assert response_json[0]['size'] == file_size new_file_id = response_json[0]['id'] assert len(new_file_id) == 40 and new_file_id != file_id assert response_json[0]['name'] == file_name else: new_file_id = response.text assert len(new_file_id) == 40 and new_file_id != file_id def request_resumable_upload(filepath, headers,upload_url_base,parent_dir,is_ajax): write_file(chunked_part1_path, chunked_part1_content) write_file(chunked_part2_path, chunked_part2_content) m = MultipartEncoder( fields={ 'parent_dir': parent_dir, 'file': (resumable_file_name, open(filepath, 'rb'), 'application/octet-stream') }) params = {'ret-json':'1'} headers["Content-type"] = m.content_type if is_ajax: response = requests.post(upload_url_base, headers = headers, data = m) else: response = requests.post(upload_url_base, headers = headers, data = m, params = params) return response def write_file(file_path, file_content): fp = open(file_path, 'w') fp.write(file_content) fp.close() def del_repo_files(repo_id): api.del_file(repo_id, '/', '[\"'+file_name+'\"]', USER) api.del_file(repo_id, '/', '[\"'+file_name_not_replaced+'\"]', USER) api.del_file(repo_id, '/', '[\"subdir\"]', USER) api.del_file(repo_id, '/', '[\"'+resumable_file_name+'\"]', USER) def del_local_files(): os.remove(file_path) os.remove(chunked_part1_path) os.remove(chunked_part2_path) def test_ajax(repo): create_test_file() create_test_dir(repo,'test') obj_id = '{"parent_dir":"/"}' #test upload file to test dir. token = api.get_fileserver_access_token(repo.id, obj_id, 'upload', USER, False) upload_url_base = 'http://127.0.0.1:8082/upload-aj/'+ token m = MultipartEncoder( fields={ 'parent_dir': '/test', 'file': (file_name, open(file_path, 'rb'), 'application/octet-stream') }) response = requests.post(upload_url_base, data = m, headers = {'Content-Type': m.content_type}) assert response.status_code == 403 #test upload file to root dir. token = api.get_fileserver_access_token(repo.id, obj_id, 'upload', USER, False) upload_url_base = 'http://127.0.0.1:8082/upload-aj/'+ token m = MultipartEncoder( fields={ 'parent_dir': '/', 'file': (file_name, open(file_path, 'rb'), 'application/octet-stream') }) response = requests.post(upload_url_base, data = m, headers = {'Content-Type': m.content_type}) assert_upload_response(response, False, False) time.sleep (1.5) repo_size = api.get_repo_size (repo.id) assert repo_size == 0 #test upload file to test dir when file already exists. m = MultipartEncoder( fields={ 'parent_dir': '/test', 'file': (file_name, open(file_path, 'rb'), 'application/octet-stream') }) response = requests.post(upload_url_base, data = m, headers = {'Content-Type': m.content_type}) assert response.status_code == 403 #test upload file to root dir when file already exists. m = MultipartEncoder( fields={ 'parent_dir': '/', 'file': (file_name, open(file_path, 'rb'), 'application/octet-stream') }) response = requests.post(upload_url_base, data = m, headers = {'Content-Type': m.content_type}) assert_upload_response(response, False, True) time.sleep (1.5) repo_size = api.get_repo_size (repo.id) assert repo_size == 0 #test upload file to subdir whose parent is test dir. m = MultipartEncoder( fields={ 'parent_dir': '/test', 'relative_path':'subdir', 'file': (file_name, open(file_path, 'rb'), 'application/octet-stream') }) response = requests.post(upload_url_base, data = m, headers = {'Content-Type': m.content_type}) assert response.status_code == 403 #test upload file to subdir whose parent is root dir. m = MultipartEncoder( fields={ 'parent_dir': '/', 'relative_path':'subdir', 'file': (file_name, open(file_path, 'rb'), 'application/octet-stream') }) response = requests.post(upload_url_base, data = m, headers = {'Content-Type': m.content_type}) assert_upload_response(response, False, False) time.sleep (1.5) repo_size = api.get_repo_size (repo.id) assert repo_size == 0 #test upload file to subdir whose parent is test dir when file already exists. m = MultipartEncoder( fields={ 'parent_dir': '/test', 'relative_path':'subdir', 'file': (file_name, open(file_path, 'rb'), 'application/octet-stream') }) response = requests.post(upload_url_base, data = m, headers = {'Content-Type': m.content_type}) assert response.status_code == 403 #test upload file to subdir whose parent is root dir when file already exists. m = MultipartEncoder( fields={ 'parent_dir': '/', 'relative_path':'subdir', 'file': (file_name, open(file_path, 'rb'), 'application/octet-stream') }) response = requests.post(upload_url_base, data = m, headers = {'Content-Type': m.content_type}) assert_upload_response(response, False, True) time.sleep (1.5) repo_size = api.get_repo_size (repo.id) assert repo_size == 0 #test resumable upload file to test dir parent_dir = '/test' headers = {'Content-Range':'bytes 0-{}/{}'.format(str(len(chunked_part1_content) - 1), str(total_size)), 'Content-Disposition':'attachment; filename=\"{}\"'.format(resumable_file_name)} response = request_resumable_upload(chunked_part1_path, headers, upload_url_base, parent_dir, True) assert_resumable_upload_response(response, repo.id, resumable_test_file_name, False) headers = {'Content-Range':'bytes {}-{}/{}'.format(str(len(chunked_part1_content)), str(total_size - 1), str(total_size)), 'Content-Disposition':'attachment; filename=\"{}\"'.format(resumable_file_name)} response = request_resumable_upload(chunked_part2_path, headers, upload_url_base, parent_dir, True) assert response.status_code == 403 #test resumable upload file to root dir parent_dir = '/' headers = {'Content-Range':'bytes 0-{}/{}'.format(str(len(chunked_part1_content) - 1), str(total_size)), 'Content-Disposition':'attachment; filename=\"{}\"'.format(resumable_file_name)} response = request_resumable_upload(chunked_part1_path, headers, upload_url_base, parent_dir, True) assert_resumable_upload_response(response, repo.id, resumable_file_name, False) repo_size = api.get_repo_size (repo.id) assert repo_size == 0 headers = {'Content-Range':'bytes {}-{}/{}'.format(str(len(chunked_part1_content)), str(total_size - 1), str(total_size)), 'Content-Disposition':'attachment; filename=\"{}\"'.format(resumable_file_name)} response = request_resumable_upload(chunked_part2_path, headers, upload_url_base, parent_dir, True) assert_resumable_upload_response(response, repo.id, resumable_file_name, True) time.sleep (2) repo_size = api.get_repo_size (repo.id) assert repo_size == total_size #test update file. write_file(file_path, file_content) token = api.get_fileserver_access_token(repo.id, obj_id, 'update', USER, False) update_url_base = 'http://127.0.0.1:8082/update-aj/' + token m = MultipartEncoder( fields={ 'target_file': '/' + file_name, 'file': (file_name, open(file_path, 'rb'), 'application/octet-stream') }) response = requests.post(update_url_base, data = m, headers = {'Content-Type': m.content_type}) assert_update_response(response, True) time.sleep (1.5) repo_size = api.get_repo_size (repo.id) assert repo_size == total_size + file_size time.sleep(1) del_repo_files(repo.id) del_local_files() def test_api(repo): create_test_file() params = {'ret-json':'1'} obj_id = '{"parent_dir":"/"}' create_test_dir(repo,'test') #test upload file to test dir instead of root dir. token = api.get_fileserver_access_token(repo.id, obj_id, 'upload', USER, False) upload_url_base = 'http://127.0.0.1:8082/upload-api/' + token m = MultipartEncoder( fields={ 'parent_dir': '/test', 'file': (file_name, open(file_path, 'rb'), 'application/octet-stream') }) response = requests.post(upload_url_base, params = params, data = m, headers = {'Content-Type': m.content_type}) assert response.status_code == 403 #test upload file to root dir. params = {'ret-json':'1'} token = api.get_fileserver_access_token(repo.id, obj_id, 'upload', USER, False) upload_url_base = 'http://127.0.0.1:8082/upload-api/' + token m = MultipartEncoder( fields={ 'parent_dir': '/', 'file': (file_name, open(file_path, 'rb'), 'application/octet-stream') }) response = requests.post(upload_url_base, params = params, data = m, headers = {'Content-Type': m.content_type}) assert_upload_response(response, False, False) time.sleep (1.5) repo_size = api.get_repo_size (repo.id) assert repo_size == 0 #test upload file to test dir instead of root dir when file already exists and replace is set. params = {'ret-json':'1'} m = MultipartEncoder( fields={ 'parent_dir': '/test', 'replace': '1', 'file': (file_name, open(file_path, 'rb'), 'application/octet-stream') }) response = requests.post(upload_url_base, params = params, data = m, headers = {'Content-Type': m.content_type}) assert response.status_code == 403 #test upload file to root dir when file already exists and replace is set. params = {'ret-json':'1'} m = MultipartEncoder( fields={ 'parent_dir': '/', 'replace': '1', 'file': (file_name, open(file_path, 'rb'), 'application/octet-stream') }) response = requests.post(upload_url_base, params = params, data = m, headers = {'Content-Type': m.content_type}) assert_upload_response(response, True, True) time.sleep (1.5) repo_size = api.get_repo_size (repo.id) assert repo_size == 0 #test upload file to test dir instead of root dir when file already exists and replace is unset. params = {'ret-json':'1'} m = MultipartEncoder( fields={ 'parent_dir': '/test', 'file': (file_name, open(file_path, 'rb'), 'application/octet-stream') }) response = requests.post(upload_url_base, params = params, data = m, headers = {'Content-Type': m.content_type}) assert response.status_code == 403 #test upload file to root dir when file already exists and replace is unset. params = {'ret-json':'1'} m = MultipartEncoder( fields={ 'parent_dir': '/', 'file': (file_name, open(file_path, 'rb'), 'application/octet-stream') }) response = requests.post(upload_url_base, params = params, data = m, headers = {'Content-Type': m.content_type}) assert_upload_response(response, False, True) time.sleep (1.5) repo_size = api.get_repo_size (repo.id) assert repo_size == 0 #test upload the file to subdir whose parent is test. params = {'ret-json':'1'} m = MultipartEncoder( fields={ 'parent_dir': '/test', 'relative_path': 'subdir', 'file': (file_name, open(file_path, 'rb'), 'application/octet-stream') }) response = requests.post(upload_url_base, params = params, data = m, headers = {'Content-Type': m.content_type}) assert response.status_code == 403 #test upload the file to subdir. params = {'ret-json':'1'} m = MultipartEncoder( fields={ 'parent_dir': '/', 'relative_path': 'subdir', 'file': (file_name, open(file_path, 'rb'), 'application/octet-stream') }) response = requests.post(upload_url_base, params = params, data = m, headers = {'Content-Type': m.content_type}) assert_upload_response(response, False, False) time.sleep (1.5) repo_size = api.get_repo_size (repo.id) assert repo_size == 0 #test upload the file to subdir whose parent is test when file already exists and replace is set. params = {'ret-json':'1'} m = MultipartEncoder( fields={ 'parent_dir': '/test', 'relative_path': 'subdir', 'replace': '1', 'file': (file_name, open(file_path, 'rb'), 'application/octet-stream') }) response = requests.post(upload_url_base, params = params, data = m, headers = {'Content-Type': m.content_type}) assert response.status_code == 403 #test upload the file to subdir when file already exists and replace is set. params = {'ret-json':'1'} m = MultipartEncoder( fields={ 'parent_dir': '/', 'relative_path': 'subdir', 'replace': '1', 'file': (file_name, open(file_path, 'rb'), 'application/octet-stream') }) response = requests.post(upload_url_base, params = params, data = m, headers = {'Content-Type': m.content_type}) assert_upload_response(response, True, True) time.sleep (1.5) repo_size = api.get_repo_size (repo.id) assert repo_size == 0 #unset test upload the file to subdir whose parent is test dir when file already exists and replace is unset. params = {'ret-json':'1'} m = MultipartEncoder( fields={ 'parent_dir': '/test', 'relative_path': 'subdir', 'file': (file_name, open(file_path, 'rb'), 'application/octet-stream') }) response = requests.post(upload_url_base, params = params, data = m, headers = {'Content-Type': m.content_type}) assert response.status_code == 403 #unset test upload the file to subdir when file already exists and replace is unset. params = {'ret-json':'1'} m = MultipartEncoder( fields={ 'parent_dir': '/', 'relative_path': 'subdir', 'file': (file_name, open(file_path, 'rb'), 'application/octet-stream') }) response = requests.post(upload_url_base, params = params, data = m, headers = {'Content-Type': m.content_type}) assert_upload_response(response, False, True) time.sleep (1.5) repo_size = api.get_repo_size (repo.id) assert repo_size == 0 #test resumable upload file to test parent_dir = '/test' headers = {'Content-Range':'bytes 0-{}/{}'.format(str(len(chunked_part1_content) - 1), str(total_size)), 'Content-Disposition':'attachment; filename=\"{}\"'.format(resumable_file_name)} response = request_resumable_upload(chunked_part1_path, headers, upload_url_base, parent_dir, False) assert_resumable_upload_response(response, repo.id, resumable_test_file_name, False) time.sleep (1.5) repo_size = api.get_repo_size (repo.id) assert repo_size == 0 headers = {'Content-Range':'bytes {}-{}/{}'.format(str(len(chunked_part1_content)), str(total_size - 1), str(total_size)), 'Content-Disposition':'attachment; filename=\"{}\"'.format(resumable_file_name)} response = request_resumable_upload(chunked_part2_path, headers, upload_url_base, parent_dir, False) assert response.status_code == 403 #test resumable upload file to root dir parent_dir = '/' headers = {'Content-Range':'bytes 0-{}/{}'.format(str(len(chunked_part1_content) - 1), str(total_size)), 'Content-Disposition':'attachment; filename=\"{}\"'.format(resumable_file_name)} response = request_resumable_upload(chunked_part1_path,headers, upload_url_base,parent_dir, False) assert_resumable_upload_response(response, repo.id, resumable_file_name, False) repo_size = api.get_repo_size (repo.id) assert repo_size == 0 headers = {'Content-Range':'bytes {}-{}/{}'.format(str(len(chunked_part1_content)), str(total_size - 1), str(total_size)), 'Content-Disposition':'attachment; filename=\"{}\"'.format(resumable_file_name)} response = request_resumable_upload(chunked_part2_path, headers, upload_url_base, parent_dir, False) assert_resumable_upload_response(response, repo.id, resumable_file_name, True) time.sleep (1.5) repo_size = api.get_repo_size (repo.id) assert repo_size == total_size #test update file. write_file(file_path, file_content) token = api.get_fileserver_access_token(repo.id, obj_id, 'update', USER, False) update_url_base = 'http://127.0.0.1:8082/update-api/' + token m = MultipartEncoder( fields={ 'target_file': '/' + file_name, 'file': (file_name, open(file_path, 'rb'), 'application/octet-stream') }) response = requests.post(update_url_base, data = m, headers = {'Content-Type': m.content_type}) assert_update_response(response, False) time.sleep (1.5) repo_size = api.get_repo_size (repo.id) assert repo_size == total_size + file_size time.sleep(1) del_repo_files(repo.id) del_local_files() def test_ajax_mtime(repo): create_test_file() obj_id = '{"parent_dir":"/"}' mtime = '2023-09-27T18:18:25+08:00' token = api.get_fileserver_access_token(repo.id, obj_id, 'upload', USER, False) upload_url_base = 'http://127.0.0.1:8082/upload-aj/'+ token m = MultipartEncoder( fields={ 'parent_dir': '/', 'last_modify': mtime, 'file': (file_name, open(file_path, 'rb'), 'application/octet-stream') }) response = requests.post(upload_url_base, data = m, headers = {'Content-Type': m.content_type}) assert_upload_response(response, False, False) dent = api.get_dirent_by_path(repo.id, '/' + file_name) assert dent.mtime == 1695809905 def test_api_mtime(repo): create_test_file() params = {'ret-json':'1'} obj_id = '{"parent_dir":"/"}' mtime = '2023-09-27T18:18:25+08:00' token = api.get_fileserver_access_token(repo.id, obj_id, 'upload', USER, False) upload_url_base = 'http://127.0.0.1:8082/upload-api/' + token m = MultipartEncoder( fields={ 'parent_dir': '/', 'last_modify': mtime, 'file': (file_name, open(file_path, 'rb'), 'application/octet-stream') }) response = requests.post(upload_url_base, params = params, data = m, headers = {'Content-Type': m.content_type}) assert_upload_response(response, False, False) dent = api.get_dirent_by_path(repo.id, '/' + file_name) assert dent.mtime == 1695809905 ================================================ FILE: tests/test_file_operation/test_upload_large_files.py ================================================ import pytest import requests import os import hashlib from tests.config import USER from seaserv import seafile_api as api from requests_toolbelt import MultipartEncoder file_name = 'file.txt' file_name_not_replaced = 'file (1).txt' file_path = os.getcwd() + '/' + file_name file_size = 400*1024*1024 download_file_name = 'download_file.txt' download_file_path = os.getcwd() + '/' + download_file_name resumable_download_file_name = 'resumable_download_file.txt' resumable_download_file_path = os.getcwd() + '/' + resumable_download_file_name resumable_file_name = 'resumable.txt' chunked_part1_name = 'part1.txt' chunked_part2_name = 'part2.txt' chunked_part1_path = os.getcwd() + '/' + chunked_part1_name chunked_part2_path = os.getcwd() + '/' + chunked_part2_name chunked_part1_size = 200*1024*1024 chunked_part2_size = 200*1024*1024 total_size = chunked_part1_size + chunked_part2_size #File_id is not used when upload files, but #the argument obj_id of get_fileserver_access_token shouldn't be NULL. file_id = '0000000000000000000000000000000000000000' def create_test_file(): fp = open(file_path, 'wb') fp.write(os.urandom(file_size)) fp.close() fp = open(chunked_part1_path, 'wb') fp.write(os.urandom(chunked_part1_size)) fp.close() fp = open(chunked_part2_path, 'wb') fp.write(os.urandom(chunked_part2_size)) fp.close() def create_test_dir(repo, dir_name): parent_dir = '/' api.post_dir(repo.id,parent_dir,dir_name,USER) def assert_upload_response(response): assert response.status_code == 200 response_json = response.json() assert response_json[0]['size'] == file_size assert response_json[0]['id'] != file_id assert response_json[0]['name'] == file_name def assert_resumable_upload_response(response, repo_id, file_name, upload_complete): assert response.status_code == 200 if not upload_complete: assert response.text == '{"success": true}' offset = api.get_upload_tmp_file_offset(repo_id, '/' + file_name) assert offset == chunked_part1_size else: response_json = response.json() assert response_json[0]['size'] == total_size new_file_id = response_json[0]['id'] assert len(new_file_id) == 40 and new_file_id != file_id assert response_json[0]['name'] == resumable_file_name def request_resumable_upload(filepath, headers,upload_url_base,parent_dir,is_ajax): m = MultipartEncoder( fields={ 'parent_dir': parent_dir, 'file': (resumable_file_name, open(filepath, 'rb'), 'application/octet-stream') }) params = {'ret-json':'1'} headers["Content-type"] = m.content_type if is_ajax: response = requests.post(upload_url_base, headers = headers, data = m) else: response = requests.post(upload_url_base, headers = headers, data = m, params = params) return response def write_file(file_path, file_content): fp = open(file_path, 'w') fp.write(file_content) fp.close() def del_repo_files(repo_id): api.del_file(repo_id, '/', '[\"'+file_name+'\"]', USER) api.del_file(repo_id, '/', '[\"'+file_name_not_replaced+'\"]', USER) api.del_file(repo_id, '/', '[\"subdir\"]', USER) api.del_file(repo_id, '/', '[\"'+resumable_file_name+'\"]', USER) def del_local_files(): os.remove(file_path) os.remove(download_file_path) os.remove(chunked_part1_path) os.remove(chunked_part2_path) os.remove(resumable_download_file_path) def sha1sum(filepath): with open(filepath, 'rb') as f: return hashlib.sha1(f.read()).hexdigest() def chunked_sha1sum(chunked_part1, chunked_part2): f1 = open(chunked_part1, 'rb') f2 = open(chunked_part2, 'rb') data = f1.read()+f2.read() sha1 = hashlib.sha1(data).hexdigest() f1.close() f2.close() return sha1 def test_large_files_ajax(repo): create_test_file() create_test_dir(repo,'test') obj_id = '{"parent_dir":"/"}' # upload large file by upload-aj file_id1 = sha1sum(file_path) token = api.get_fileserver_access_token(repo.id, obj_id, 'upload', USER, False) upload_url_base = 'http://127.0.0.1:8082/upload-aj/'+ token m = MultipartEncoder( fields={ 'parent_dir': '/', 'file': (file_name, open(file_path, 'rb'), 'application/octet-stream') }) response = requests.post(upload_url_base, data = m, headers = {'Content-Type': m.content_type}) assert_upload_response(response) # download file and check sha1 obj_id = api.get_file_id_by_path(repo.id, '/' + file_name) assert obj_id != None token = api.get_fileserver_access_token (repo.id, obj_id, 'download', USER, False) download_url = 'http://127.0.0.1:8082/files/' + token + '/' + file_name response = requests.get(download_url) assert response.status_code == 200 with open(download_file_path, 'wb') as fp: fp.write(response.content) file_id2 = sha1sum(download_file_path) assert file_id1 == file_id2 file_id1 = chunked_sha1sum(chunked_part1_path, chunked_part2_path) parent_dir = '/' headers = {'Content-Range':'bytes 0-{}/{}'.format(str(chunked_part1_size - 1), str(total_size)), 'Content-Disposition':'attachment; filename=\"{}\"'.format(resumable_file_name)} response = request_resumable_upload(chunked_part1_path, headers, upload_url_base, parent_dir, True) assert_resumable_upload_response(response, repo.id, resumable_file_name, False) headers = {'Content-Range':'bytes {}-{}/{}'.format(str(chunked_part1_size), str(total_size - 1), str(total_size)), 'Content-Disposition':'attachment; filename=\"{}\"'.format(resumable_file_name)} response = request_resumable_upload(chunked_part2_path, headers, upload_url_base, parent_dir, True) assert_resumable_upload_response(response, repo.id, resumable_file_name, True) # download file and check sha1 obj_id = api.get_file_id_by_path(repo.id, '/' + resumable_file_name) assert obj_id != None token = api.get_fileserver_access_token (repo.id, obj_id, 'download', USER, False) download_url = 'http://127.0.0.1:8082/files/' + token + '/' + resumable_file_name response = requests.get(download_url) assert response.status_code == 200 with open(resumable_download_file_path, 'wb') as fp: fp.write(response.content) file_id2 = sha1sum(resumable_download_file_path) assert file_id1 == file_id2 del_repo_files(repo.id) del_local_files() def test_large_files_api(repo): create_test_file() params = {'ret-json':'1'} obj_id = '{"parent_dir":"/"}' create_test_dir(repo,'test') #test upload file to root dir. file_id1 = sha1sum(file_path) params = {'ret-json':'1'} token = api.get_fileserver_access_token(repo.id, obj_id, 'upload', USER, False) upload_url_base = 'http://127.0.0.1:8082/upload-api/' + token m = MultipartEncoder( fields={ 'parent_dir': '/', 'file': (file_name, open(file_path, 'rb'), 'application/octet-stream') }) response = requests.post(upload_url_base, params = params, data = m, headers = {'Content-Type': m.content_type}) assert_upload_response(response) # download file and check sha1 obj_id = api.get_file_id_by_path(repo.id, '/' + file_name) assert obj_id != None token = api.get_fileserver_access_token (repo.id, obj_id, 'download', USER, False) download_url = 'http://127.0.0.1:8082/files/' + token + '/' + file_name response = requests.get(download_url) assert response.status_code == 200 with open(download_file_path, 'wb') as fp: fp.write(response.content) file_id2 = sha1sum(download_file_path) assert file_id1 == file_id2 #test resumable upload file to test file_id1 = chunked_sha1sum(chunked_part1_path, chunked_part2_path) parent_dir = '/' headers = {'Content-Range':'bytes 0-{}/{}'.format(str(chunked_part1_size - 1), str(total_size)), 'Content-Disposition':'attachment; filename=\"{}\"'.format(resumable_file_name)} response = request_resumable_upload(chunked_part1_path, headers, upload_url_base, parent_dir, False) assert_resumable_upload_response(response, repo.id, resumable_file_name, False) headers = {'Content-Range':'bytes {}-{}/{}'.format(str(chunked_part1_size), str(total_size - 1), str(total_size)), 'Content-Disposition':'attachment; filename=\"{}\"'.format(resumable_file_name)} response = request_resumable_upload(chunked_part2_path, headers, upload_url_base, parent_dir, False) assert_resumable_upload_response(response, repo.id, resumable_file_name, True) obj_id = api.get_file_id_by_path(repo.id, '/' + resumable_file_name) assert obj_id != None token = api.get_fileserver_access_token (repo.id, obj_id, 'download', USER, False) download_url = 'http://127.0.0.1:8082/files/' + token + '/' + resumable_file_name response = requests.get(download_url) assert response.status_code == 200 with open(resumable_download_file_path, 'wb') as fp: fp.write(response.content) file_id2 = sha1sum(resumable_download_file_path) assert file_id1 == file_id2 del_repo_files(repo.id) del_local_files() ================================================ FILE: tests/test_file_operation/test_zip_download.py ================================================ import pytest import requests import os import time import zipfile import json from tests.config import USER from seaserv import seafile_api as api file1_name = 'file1.txt' file2_name = 'file2.txt' file1_path = os.getcwd() + '/' + file1_name file2_path = os.getcwd() + '/' + file2_name file1_content ='File1 content' file2_content ='File2 content' download_dir_path = os.getcwd() + '/download_dir' def create_test_files(): os.mkdir(download_dir_path) with open(file1_path, 'w') as fp1: fp1.write(file1_content) with open(file2_path, 'w') as fp2: fp2.write(file2_content) def remove_test_files(): os.rmdir(download_dir_path) os.remove(file1_path) os.remove(file2_path) def test_zip_download(): create_test_files() t_repo_id = api.create_repo('test_zip_download', '', USER) base_url = 'http://127.0.0.1:8082/' #test zip download dir dir_name = 'dir' api.post_dir(t_repo_id, '/', dir_name, USER) api.post_file(t_repo_id, file1_path, '/dir', file1_name, USER) api.post_file(t_repo_id, file2_path, '/dir', file2_name, USER) dir_id = api.get_dir_id_by_path(t_repo_id, '/dir') obj_id = {'obj_id': dir_id, 'dir_name': dir_name, 'is_windows': 0} obj_id_json_str = json.dumps(obj_id) token = api.get_fileserver_access_token(t_repo_id, obj_id_json_str, 'download-dir', USER) time.sleep(1) download_url = base_url + 'zip/' + token response = requests.get(download_url) assert response.status_code == 200 download_zipfile_path = download_dir_path + '/dir.zip' with open(download_zipfile_path, 'wb') as fp: fp.write(response.content) zipFile = zipfile.ZipFile(download_zipfile_path) for name in zipFile.namelist(): zipFile.extract(name, download_dir_path) zipFile.close() assert os.path.exists(download_dir_path + '/dir.zip') assert os.path.exists(download_dir_path + '/dir') assert os.path.exists(download_dir_path + '/dir' + '/file1.txt') assert os.path.exists(download_dir_path + '/dir' + '/file2.txt') with open(download_dir_path + '/dir' + '/file1.txt', 'r') as fp1: line = fp1.read() assert line == file1_content with open(download_dir_path + '/dir' + '/file2.txt', 'r') as fp2: line = fp2.read() assert line == file2_content os.remove(download_dir_path + '/dir' + '/file1.txt') os.remove(download_dir_path + '/dir' + '/file2.txt') os.rmdir(download_dir_path + '/dir') os.remove(download_dir_path + '/dir.zip') #test zip download empty dir empty_dir_name = 'empty_dir' api.post_dir(t_repo_id, '/', empty_dir_name, USER) dir_id = api.get_dir_id_by_path(t_repo_id, '/empty_dir') obj_id = {'obj_id': dir_id, 'dir_name': empty_dir_name, 'is_windows': 0} obj_id_json_str = json.dumps(obj_id) token = api.get_fileserver_access_token(t_repo_id, obj_id_json_str, 'download-dir', USER) time.sleep(1) download_url = base_url + 'zip/' + token response = requests.get(download_url) assert response.status_code == 200 download_zipfile_path = download_dir_path + '/empty_dir.zip' with open(download_zipfile_path, 'wb') as fp: fp.write(response.content) zipFile = zipfile.ZipFile(download_zipfile_path) for name in zipFile.namelist(): zipFile.extract(name, download_dir_path) zipFile.close() assert os.path.exists(download_dir_path + '/empty_dir') assert not os.listdir(download_dir_path + '/empty_dir') os.rmdir(download_dir_path + '/empty_dir') os.remove(download_dir_path + '/empty_dir.zip') #test zip download mutliple files api.post_file(t_repo_id, file1_path, '/', file1_name, USER) api.post_file(t_repo_id, file2_path, '/', file2_name, USER) obj_id = {'parent_dir': '/', 'file_list': [file1_name, file2_name], 'is_windows' : 0} obj_id_json_str = json.dumps(obj_id) token = api.get_fileserver_access_token(t_repo_id, obj_id_json_str, 'download-multi', USER) time.sleep(1) download_url = base_url + 'zip/' + token response = requests.get(download_url) assert response.status_code == 200 download_zipfile_path = download_dir_path + '/multi_files.zip' with open(download_zipfile_path, 'wb') as fp: fp.write(response.content) zipFile = zipfile.ZipFile(download_zipfile_path) for name in zipFile.namelist(): zipFile.extract(name, download_dir_path) zipFile.close() assert os.path.exists(download_dir_path + '/file1.txt') assert os.path.exists(download_dir_path + '/file2.txt') with open(download_dir_path + '/file1.txt', 'r') as fp1: line = fp1.read() assert line == file1_content with open(download_dir_path + '/file2.txt', 'r') as fp2: line = fp2.read() assert line == file2_content os.remove(download_dir_path + '/file1.txt') os.remove(download_dir_path + '/file2.txt') os.remove(download_dir_path + '/multi_files.zip') #test zip download mutliple files in multi-level api.post_file(t_repo_id, file2_path, '/dir', file2_name, USER) obj_id = {'parent_dir': '/', 'file_list': [file1_name, 'dir/'+file2_name], 'is_windows' : 0} obj_id_json_str = json.dumps(obj_id) token = api.get_fileserver_access_token(t_repo_id, obj_id_json_str, 'download-multi', USER) time.sleep(1) download_url = base_url + 'zip/' + token response = requests.get(download_url) assert response.status_code == 200 download_zipfile_path = download_dir_path + '/multi_files.zip' with open(download_zipfile_path, 'wb') as fp: fp.write(response.content) zipFile = zipfile.ZipFile(download_zipfile_path) for name in zipFile.namelist(): zipFile.extract(name, download_dir_path) zipFile.close() assert os.path.exists(download_dir_path + '/file1.txt') assert os.path.exists(download_dir_path + '/file2.txt') with open(download_dir_path + '/file1.txt', 'r') as fp1: line = fp1.read() assert line == file1_content with open(download_dir_path + '/file2.txt', 'r') as fp2: line = fp2.read() assert line == file2_content os.remove(download_dir_path + '/file1.txt') os.remove(download_dir_path + '/file2.txt') os.remove(download_dir_path + '/multi_files.zip') remove_test_files() api.remove_repo(t_repo_id) ================================================ FILE: tests/test_file_property_and_dir_listing/test_file_property_and_dir_listing.py ================================================ import pytest import os import time from tests.config import USER from seaserv import seafile_api as api file_name = 'test.txt' dir_name = 'test_dir' file_content = 'test file content' file_path = os.getcwd() + '/' + file_name def create_the_file (): fp = open(file_path, 'w') fp.write(file_content) fp.close() def test_file_property_and_dir_listing (): t_repo_version = 1 t_repo_id = api.create_repo('test_file_property_and_dir_listing', '', USER, passwd=None) create_the_file() api.post_file(t_repo_id, file_path, '/', file_name, USER) api.post_dir(t_repo_id, '/', dir_name, USER) api.post_file(t_repo_id, file_path, '/' + dir_name, file_name, USER) #test is_valid_filename t_valid_file_name = 'valid_filename' t_invalid_file_name = '/invalid_filename' assert api.is_valid_filename(t_repo_id, t_valid_file_name) assert api.is_valid_filename(t_repo_id, t_invalid_file_name) == 0 #test get_file_id_by_path t_file_id = api.get_file_id_by_path(t_repo_id, '/test.txt') assert t_file_id #test get_dir_id_by_path t_dir_id = api.get_dir_id_by_path(t_repo_id, '/test_dir') assert t_dir_id #test get_file_size t_file_size = len(file_content) assert t_file_size == api.get_file_size(t_repo_id, t_repo_version, t_file_id) #test get_dir_size t_dir_size = len(file_content) assert t_dir_size == api.get_dir_size(t_repo_id, t_repo_version, t_dir_id) #test get_file_count_info_by_path t_file_count_info = api.get_file_count_info_by_path(t_repo_id , '/') assert t_file_count_info.file_count == 2 assert t_file_count_info.dir_count == 1 assert t_file_count_info.size == t_file_size + t_dir_size #test get_file_id_by_commit_and_path t_file_id_tmp = t_file_id t_repo = api.get_repo(t_repo_id) assert t_repo t_commit_id = t_repo.head_cmmt_id t_file_id = api.get_file_id_by_commit_and_path(t_repo_id, t_commit_id, '/test.txt') assert t_file_id == t_file_id_tmp #test get_dirent_by_path std_file_mode = 0o100000 | 0o644 t_dirent_obj = api.get_dirent_by_path(t_repo_id, '/test.txt') assert t_dirent_obj assert t_dirent_obj.obj_id == t_file_id assert t_dirent_obj.obj_name == 'test.txt' assert t_dirent_obj.mode == std_file_mode assert t_dirent_obj.version == t_repo_version assert t_dirent_obj.size == t_file_size assert t_dirent_obj.modifier == USER #test list_file_by_file_id t_block_list = api.list_file_by_file_id(t_repo_id, t_file_id) assert t_block_list #test list_blocks_by_file_id t_block_list = api.list_blocks_by_file_id(t_repo_id, t_file_id) assert t_block_list #test list_dir_by_dir_id t_dir_list = api.list_dir_by_dir_id(t_repo_id, t_dir_id) assert len(t_dir_list) == 1 #test list_dir_by_path t_dir_list = api.list_dir_by_path(t_repo_id, '/test_dir') assert len(t_dir_list) == 1 #test get_dir_id_by_commit_and_path t_dir_id = api.get_dir_id_by_commit_and_path(t_repo_id, t_commit_id, '/test_dir') assert t_dir_id #test list_dir_by_commit_and_path t_dir_list = api.list_dir_by_commit_and_path(t_repo_id, t_commit_id, '/test_dir') assert len(t_dir_list) == 1 #test list_dir_with_perm t_dir_list = api.list_dir_with_perm(t_repo_id, '/test_dir', t_dir_id, USER) assert len(t_dir_list) == 1 #test mkdir_with_parent api.mkdir_with_parents (t_repo_id, '/test_dir', 'test_subdir', USER) t_dir_id = api.get_dir_id_by_path(t_repo_id, '/test_dir/test_subdir') assert t_dir_id #test get_total_storage t_total_size = api.get_total_storage() t_repo_size = api.get_repo_size(t_repo_id) assert t_total_size == t_repo_size #get_total_file_number time.sleep(1) assert api.get_total_file_number() == 2 api.remove_repo(t_repo_id) ================================================ FILE: tests/test_gc/test_gc.py ================================================ import pytest import requests import os import time from subprocess import run from tests.config import USER, USER2 from seaserv import seafile_api as api from concurrent.futures import ThreadPoolExecutor from requests_toolbelt import MultipartEncoder file_name = 'file.txt' first_name = 'first.txt' first_path = os.getcwd() + '/' + first_name first_content = 'Fist file content.\r\n' second_name = 'second.txt' second_content = 'Second file content.\r\n' second_path = os.getcwd() + '/' + second_name third_name = 'third.txt' third_path = os.getcwd() + '/' + third_name third_content = 'Third file content.\r\n' def create_test_file(): fp = open(first_path, 'w') fp.write(first_content) fp.close() fp = open(second_path, 'w') fp.write(second_content) fp.close() fp = open(third_path, 'w') fp.write(third_content) fp.close() def del_local_files(): os.remove(first_path) os.remove(second_path) os.remove(third_path) def create_test_dir(repo, dir_name): parent_dir = '/' api.post_dir(repo.id,parent_dir,dir_name,USER) def run_gc(repo_id, rm_fs, check): cmdStr = 'seafserv-gc --verbose -F /tmp/seafile-tests/conf -d /tmp/seafile-tests/seafile-data %s %s %s'%(rm_fs, check, repo_id) cmd=cmdStr.split(' ') ret = run (cmd) assert ret.returncode == 0 @pytest.mark.parametrize('rm_fs', ['', '--rm-fs']) def test_gc_full_history(repo, rm_fs): create_test_file() api.set_repo_valid_since (repo.id, -1) create_test_dir(repo,'subdir') v_repo_id = api.share_subdir_to_user(repo.id, '/subdir', USER, USER2, 'rw') assert v_repo_id is not None assert api.post_file(repo.id, first_path, '/subdir', file_name, USER) == 0 assert api.post_empty_file(repo.id, '/', file_name, USER) == 0 t_repo = api.get_repo(repo.id) assert api.put_file(repo.id, first_path, '/', file_name, USER, t_repo.head_cmmt_id) t_repo = api.get_repo(repo.id) assert api.put_file(repo.id, second_path, '/', file_name, USER, t_repo.head_cmmt_id) t_repo = api.get_repo(repo.id) assert api.put_file(repo.id, third_path, '/', file_name, USER, t_repo.head_cmmit_id) time.sleep(1) api.del_file(repo.id, '/', '[\"'+file_name+'\"]', USER) run_gc(repo.id, rm_fs, '') run_gc(repo.id, '', '--check') del_local_files() @pytest.mark.parametrize('rm_fs', ['', '--rm-fs']) def test_gc_no_history(repo, rm_fs): create_test_file() api.set_repo_valid_since (repo.id, 0) create_test_dir(repo,'subdir') v_repo_id = api.share_subdir_to_user(repo.id, '/subdir', USER, USER2, 'rw') assert v_repo_id is not None assert api.post_file(repo.id, first_path, '/subdir', file_name, USER) == 0 assert api.post_empty_file(repo.id, '/', file_name, USER) == 0 t_repo = api.get_repo(repo.id) assert api.put_file(repo.id, first_path, '/', file_name, USER, t_repo.head_cmmt_id) t_repo = api.get_repo(repo.id) assert api.put_file(repo.id, second_path, '/', file_name, USER, t_repo.head_cmmt_id) t_repo = api.get_repo(repo.id) time.sleep(1) assert api.put_file(repo.id, third_path, '/', file_name, USER, t_repo.head_cmmt_id) time.sleep(1) api.del_file(repo.id, '/', '[\"'+file_name+'\"]', USER) run_gc(repo.id, rm_fs, '') api.set_repo_valid_since (repo.id, 0) run_gc(repo.id, '', '--check') del_local_files() @pytest.mark.parametrize('rm_fs', ['', '--rm-fs']) def test_gc_partial_history(repo, rm_fs): create_test_file() create_test_dir(repo,'subdir') v_repo_id = api.share_subdir_to_user(repo.id, '/subdir', USER, USER2, 'rw') assert v_repo_id is not None assert api.post_file(repo.id, first_path, '/subdir', file_name, USER) == 0 assert api.post_empty_file(repo.id, '/', file_name, USER) == 0 t_repo = api.get_repo(repo.id) time.sleep(1) assert api.put_file(repo.id, first_path, '/', file_name, USER, t_repo.head_cmmt_id) t_repo = api.get_repo(repo.id) time.sleep(1) assert api.put_file(repo.id, second_path, '/', file_name, USER, t_repo.head_cmmt_id) t_repo = api.get_repo(repo.id) t_commit = api.get_commit(t_repo.id, t_repo.version, t_repo.head_cmmt_id) api.set_repo_valid_since (repo.id, t_commit.ctime) time.sleep(1) assert api.put_file(repo.id, third_path, '/', file_name, USER, t_repo.head_cmmt_id) api.del_file(repo.id, '/', '[\"'+file_name+'\"]', USER) run_gc(repo.id, rm_fs, '') run_gc(repo.id, '', '--check') del_local_files() ================================================ FILE: tests/test_get_repo_list/test_get_repo_list.py ================================================ import pytest from seaserv import seafile_api as api from tests.config import USER from tests.utils import randstring attr_to_assert = ['id', 'name', 'version', 'last_modify', 'size', 'last_modifier', 'head_cmmt_id', 'repo_id', 'repo_name', 'last_modified', 'encrypted', 'is_virtual', 'origin_repo_id', 'origin_repo_name', 'origin_path', 'store_id' ,'share_type', 'permission', 'user', 'group_id'] def assert_by_attr_name (repo, repo_to_test, attr): if (attr == 'id'): assert getattr(repo_to_test, attr) == repo.id elif (attr == 'name'): assert getattr(repo_to_test, attr) == repo.name elif (attr == 'size'): assert getattr(repo_to_test, attr) == repo.size elif (attr == 'last_modifier'): assert getattr(repo_to_test, attr) == repo.last_modifier elif (attr == 'head_cmmt_id'): assert getattr(repo_to_test, attr) == repo.head_cmmt_id elif (attr == 'repo_id'): assert getattr(repo_to_test, attr) == repo.id elif (attr == 'repo_name'): assert getattr(repo_to_test, attr) == repo.name elif (attr == 'last_modified'): assert getattr(repo_to_test, attr) == repo.last_modified elif (attr == 'encrypted'): assert getattr(repo_to_test, attr) == repo.encrypted elif (attr == 'is_virtual'): assert getattr(repo_to_test, attr) == repo.is_virtual elif (attr == 'origin_repo_id'): assert getattr(repo_to_test, attr) == repo.origin_repo_id elif (attr == 'origin_repo_name'): assert getattr(repo_to_test, attr) != None elif (attr == 'origin_path'): assert getattr(repo_to_test, attr) == repo.origin_path elif (attr == 'store_id'): assert getattr(repo_to_test, attr) == repo.store_id elif (attr == 'share_type'): assert getattr(repo_to_test, attr) != None elif (attr == 'permission'): assert getattr(repo_to_test, attr) == 'rw' elif (attr == 'group_id'): assert getattr(repo_to_test,attr) != 0 def assert_public_repos_attr(repo, repo_to_test): for attr in attr_to_assert: assert hasattr(repo_to_test, attr) == True assert hasattr(repo_to_test, 'is_virtual') is_virtual = getattr(repo_to_test, 'is_virtual') if (is_virtual == False): if (attr == 'origin_repo_id' or attr == 'origin_path'): continue if (attr == 'origin_repo_name'): continue if (attr == 'group_id'): continue assert_by_attr_name(repo, repo_to_test, attr) def assert_group_repos_attr(repo, repo_to_test): for attr in attr_to_assert: assert hasattr(repo_to_test, attr) == True assert hasattr(repo_to_test, 'is_virtual') is_virtual = getattr(repo_to_test, 'is_virtual') if (is_virtual == False): if (attr == 'origin_repo_id' or attr == 'origin_repo_name' or attr == 'origin_path'): continue assert_by_attr_name(repo, repo_to_test, attr) def test_get_group_repos(repo, group): repo = api.get_repo(repo.id) api.group_share_repo(repo.id, group.id, USER, 'rw') repos = api.get_repos_by_group(group.id) assert_group_repos_attr(repo, repos[0]) repos = api.get_group_repos_by_owner(USER) assert_group_repos_attr(repo, repos[0]) v_repo_id = api.share_subdir_to_group(repo.id, '/dir1', USER, group.id, 'rw') v_repo = api.get_repo(v_repo_id) v_repo_to_test = api.get_group_shared_repo_by_path(repo.id, '/dir1', group.id) assert_group_repos_attr(v_repo, v_repo_to_test) api.unshare_subdir_for_group(repo.id, '/dir1', USER, group.id) repos = api.get_group_repos_by_user(USER) assert_group_repos_attr(repo, repos[0]) assert api.group_unshare_repo(repo.id, group.id, USER) == 0 def test_get_inner_pub_repos(repo): repo = api.get_repo(repo.id) api.add_inner_pub_repo(repo.id, 'rw') repos = api.get_inner_pub_repo_list() assert_public_repos_attr(repo, repos[0]) repos = api.list_inner_pub_repos_by_owner(USER) assert_public_repos_attr(repo, repos[0]) assert api.remove_inner_pub_repo(repo.id) == 0 ================================================ FILE: tests/test_group/test_groups.py ================================================ import pytest from seaserv import seafile_api as api from seaserv import ccnet_api from tests.config import USER, USER2 from tests.utils import randstring def test_multi_tier_groups(repo): id1 = ccnet_api.create_group('group1', USER, parent_group_id=-1) id2 = ccnet_api.create_group('group2', USER2, parent_group_id = id1) id3 = ccnet_api.create_group('group3', USER, parent_group_id = id1) id4 = ccnet_api.create_group('group4', USER2, parent_group_id = id3) id5 = ccnet_api.create_group('group5', USER2, parent_group_id = 0) assert id1 != -1 and id2 != -1 and id3 != -1 and id4 != -1 group1 = ccnet_api.get_group(id1) group2 = ccnet_api.get_group(id2) group3 = ccnet_api.get_group(id3) group4 = ccnet_api.get_group(id4) assert group1.parent_group_id == -1 assert group2.parent_group_id == id1 assert group3.parent_group_id == id1 assert group4.parent_group_id == id3 members = ccnet_api.search_group_members (id1, 'randgroup{}'.format(randstring(6))) assert len(members) == 0 members = ccnet_api.search_group_members (id1, USER) assert len(members) == 1 assert members[0].user_name == USER ances_order = [id5, id4, id3, id2, id1] user2_groups_with_ancestors = ccnet_api.get_groups (USER2, return_ancestors = True) assert len(user2_groups_with_ancestors) == 5 i = 0 for g in user2_groups_with_ancestors: assert g.id == ances_order[i] i = i + 1 order = [id5, id4, id2] i = 0 user2_groups = ccnet_api.get_groups (USER2) assert len(user2_groups) == 3 for g in user2_groups: assert g.id == order[i] i = i + 1 top_groups = ccnet_api.get_top_groups(True) assert len(top_groups) == 1 for g in top_groups: assert g.parent_group_id == -1 child_order = [id2, id3] i = 0 id1_children = ccnet_api.get_child_groups(id1) assert len(id1_children) == 2 for g in id1_children: assert g.id == child_order[i] i = i + 1 group4_order = [id1, id3, id4] i = 0 group4_ancestors = ccnet_api.get_ancestor_groups(id4) assert len(group4_ancestors) == 3 for g in group4_ancestors: assert g.id == group4_order[i] i = i + 1 rm5 = ccnet_api.remove_group(id5) rm4 = ccnet_api.remove_group(id4) rm3 = ccnet_api.remove_group(id3) rm2 = ccnet_api.remove_group(id2) rm1 = ccnet_api.remove_group(id1) assert rm5 == 0 and rm4 == 0 and rm3 == 0 and rm2 == 0 and rm1 == 0 ================================================ FILE: tests/test_password/test_password.py ================================================ import pytest from tests.config import USER from seaserv import seafile_api as api @pytest.mark.parametrize('rpc, enc_version', [('create_repo', 2), ('create_repo', 3), ('create_repo', 4), ('create_enc_repo', 2), ('create_enc_repo', 3), ('create_enc_repo', 4)]) def test_encrypted_repo(rpc, enc_version): test_repo_name = 'test_enc_repo' test_repo_desc = 'test_enc_repo' test_repo_passwd = 'test_enc_repo' if rpc == 'create_repo': repo_id = api.create_repo(test_repo_name, test_repo_desc, USER, test_repo_passwd, enc_version) assert repo_id else: if enc_version == 2: repo_id = 'd17bf8ca-3019-40ee-8fdb-0258c89fb762' elif enc_version == 3: repo_id = 'd17bf8ca-3019-40ee-8fdb-0258c89fb763' else: repo_id = 'd17bf8ca-3019-40ee-8fdb-0258c89fb764' enc_info = api.generate_magic_and_random_key(enc_version, repo_id, test_repo_passwd) assert enc_info ret_repo_id = api.create_enc_repo(repo_id, test_repo_name, test_repo_desc, USER, enc_info.magic, enc_info.random_key, enc_info.salt, enc_version) assert ret_repo_id == repo_id repo = api.get_repo(repo_id) assert repo assert repo.enc_version == enc_version assert len(repo.magic) == 64 assert len(repo.random_key) == 96 if enc_version == 3 or enc_version == 4: assert len(repo.salt) == 64 new_passwd = 'new password' assert api.set_passwd(repo.id, USER, test_repo_passwd) == 0 assert api.get_decrypt_key(repo.id, USER) api.change_repo_passwd(repo.repo_id, test_repo_passwd, new_passwd, USER) == 0 assert api.set_passwd(repo.id, USER, new_passwd) == 0 assert api.is_password_set(repo.id, USER) assert api.unset_passwd(repo.id, USER) == 0 assert api.is_password_set(repo.id, USER) == 0 api.remove_repo(repo_id) @pytest.mark.parametrize('rpc, enc_version, algo, params', [('create_repo', 2, 'pbkdf2_sha256', '1000'), ('create_repo', 3, 'pbkdf2_sha256', '1000'), ('create_repo', 4, 'pbkdf2_sha256', '1000'), ('create_repo', 2, 'argon2id', '2,102400,8'), ('create_repo', 3, 'argon2id', '2,102400,8'), ('create_repo', 4, 'argon2id', '2,102400,8')]) def test_pwd_hash(rpc, enc_version, algo, params): test_repo_name = 'test_enc_repo' test_repo_desc = 'test_enc_repo' test_repo_passwd = 'test_enc_repo' repo_id = api.create_repo(test_repo_name, test_repo_desc, USER, test_repo_passwd, enc_version, pwd_hash_algo=algo, pwd_hash_params=params) assert repo_id repo = api.get_repo(repo_id) assert repo assert repo.enc_version == enc_version assert len(repo.pwd_hash) == 64 assert len(repo.random_key) == 96 if enc_version > 2: assert len(repo.salt) == 64 new_passwd = 'new password' assert api.set_passwd(repo.id, USER, test_repo_passwd) == 0 assert api.get_decrypt_key(repo.id, USER) api.change_repo_passwd(repo.repo_id, test_repo_passwd, new_passwd, USER) == 0 assert api.set_passwd(repo.id, USER, new_passwd) == 0 assert api.is_password_set(repo.id, USER) assert api.unset_passwd(repo.id, USER) == 0 assert api.is_password_set(repo.id, USER) == 0 api.remove_repo(repo_id) @pytest.mark.parametrize('enc_version, algo, params', [(2, 'pbkdf2_sha256', '1000'), (3, 'pbkdf2_sha256', '1000'), ( 4, 'pbkdf2_sha256', '1000'), (2, 'argon2id', '2,102400,8'), (3, 'argon2id', '2,102400,8'), (4, 'argon2id', '2,102400,8')]) def test_upgrade_pwd_hash(enc_version, algo, params): test_repo_name = 'test_enc_repo' test_repo_desc = 'test_enc_repo' test_repo_passwd = 'test_enc_repo' repo_id = api.create_repo(test_repo_name, test_repo_desc, USER, test_repo_passwd, enc_version) assert repo_id repo = api.get_repo(repo_id) assert repo assert repo.enc_version == enc_version assert len(repo.random_key) == 96 if enc_version > 2: assert len(repo.salt) == 64 api.upgrade_repo_pwd_hash_algorithm (repo.repo_id, USER, test_repo_passwd, algo, params) == 0 repo = api.get_repo(repo_id) assert repo.pwd_hash_algo == algo; assert repo.pwd_hash_params == params; assert repo.pwd_hash assert api.set_passwd(repo.id, USER, test_repo_passwd) == 0 assert api.get_decrypt_key(repo.id, USER) assert api.is_password_set(repo.id, USER) assert api.unset_passwd(repo.id, USER) == 0 assert api.is_password_set(repo.id, USER) == 0 api.remove_repo(repo_id) ================================================ FILE: tests/test_repo_manipulation/test_repo_manipulation.py ================================================ import pytest from tests.config import USER, USER2 from seaserv import seafile_api as api def get_repo_list_order_by(t_start, t_limit, order_by): t_repo_list = api.get_repo_list(t_start, t_limit, order_by) assert t_repo_list and len(t_repo_list) if order_by == "size": assert t_repo_list[0].size >= t_repo_list[1].size if order_by == "file_count": assert t_repo_list[0].file_count >= t_repo_list[1].file_count def test_repo_manipulation(): #test get_system_default_repo_id t_default_repo_id = api.get_system_default_repo_id() assert t_default_repo_id #test create_repo t_repo_id = api.create_repo('test_repo_manipulation', '', USER, passwd=None) assert t_repo_id #test counts_repo t_repo_count = 0 t_repo_count = api.count_repos() assert t_repo_count != 0 #test get_repo ,edit_repo t_new_name = 'n_name' t_new_desc = 'n_desc' t_repo_version = 1 t_repo = api.get_repo(t_repo_id) assert t_repo api.edit_repo(t_repo_id, t_new_name, t_new_desc, USER) t_repo = api.get_repo(t_repo_id) assert t_repo.name == t_new_name and t_repo.desc == t_new_desc #test revert_repo and get_commit t_commit_id_before_changing = t_repo.head_cmmt_id api.post_dir(t_repo_id, '/', 'dir1', USER) t_repo = api.get_repo(t_repo_id) api.revert_repo(t_repo_id, t_commit_id_before_changing, USER) t_repo = api.get_repo(t_repo_id) t_commit_id_after_revert = t_repo.head_cmmt_id t_commit_before_changing = api.get_commit(t_repo_id, t_repo_version, t_commit_id_before_changing) t_commit_after_revert = api.get_commit(t_repo_id, t_repo_version, t_commit_id_after_revert) assert t_commit_before_changing.root_id == t_commit_after_revert.root_id #test is_repo_owner assert api.is_repo_owner(USER, t_repo_id) assert api.is_repo_owner(USER2, t_repo_id) == 0 #test get_repo_owner owner_get = api.get_repo_owner(t_repo_id) assert owner_get == USER #test set_repo_owner api.set_repo_owner(t_repo_id, USER2) assert api.is_repo_owner(USER2, t_repo_id) #test create_enc_repo t_enc_repo_id = '826d1b7b-f110-46f2-8d5e-7b5ac3e11f4d' t_enc_version = 2 t_passwd = '123' magic_and_random_key = api.generate_magic_and_random_key (t_enc_version, t_enc_repo_id, t_passwd) t_magic = magic_and_random_key.magic t_random_key = magic_and_random_key.random_key t_enc_repo_id = api.create_enc_repo (t_enc_repo_id, 'test_encrypted_repo', '', USER, t_magic, t_random_key, None, t_enc_version) assert t_enc_repo_id == '826d1b7b-f110-46f2-8d5e-7b5ac3e11f4d' #test get_repos_by_id_prefix t_id_prefix = '826d1b7b' t_repo_list = api.get_repos_by_id_prefix(t_id_prefix, -1, -1) assert t_repo_list[0].id == '826d1b7b-f110-46f2-8d5e-7b5ac3e11f4d' #test get_repo_list #test order by None order_by = None get_repo_list_order_by(-1 ,-1, order_by) #test order by size order_by = "size" get_repo_list_order_by(-1 ,-1, order_by) #test order by file_count order_by = "file_count" get_repo_list_order_by(-1 ,-1, order_by) t_start = 1; t_limit = 1; t_repo_list = api.get_repo_list(t_start, t_limit, None) assert t_repo_list and len(t_repo_list) == 1 #test get_owned_repo_list t_repo_list = api.get_owned_repo_list(USER2) assert t_repo_list and len(t_repo_list) #test get_commit_list t_offset = 0; t_limit = 0; t_commit_list = api.get_commit_list(t_repo_id, t_offset, t_limit) assert t_commit_list and len(t_commit_list) == 4 t_offset = 1; t_limit = 1; t_commit_list = api.get_commit_list(t_repo_id, t_offset, t_limit) assert t_commit_list and len(t_commit_list) == 1 #test search_repos_by_name t_repo_list = api.search_repos_by_name (t_repo.name) assert len (t_repo_list) == 1 and t_repo_list[0].id == t_repo_id t_repo_list = api.search_repos_by_name (t_repo.name.upper()) assert len (t_repo_list) == 1 and t_repo_list[0].id == t_repo_id t_repo_list = api.search_repos_by_name (t_repo.name.lower()) assert len (t_repo_list) == 1 and t_repo_list[0].id == t_repo_id #test remove_repo api.remove_repo(t_repo_id) t_repo = api.get_repo(t_repo_id) assert t_repo == None ================================================ FILE: tests/test_server_config/test_server_config.py ================================================ import pytest from seaserv import seafile_api as api def test_server_config(): #test_set_server_config_int and get_server_config_int t_group = 't_group' t_key = 't_key' t_value = 1 api.set_server_config_int(t_group, t_key, t_value) t_ret = api.get_server_config_int(t_group, t_key) assert t_ret == t_value #test_set_server_config_int64 and get_server_config_int64 t_group = 't_group' t_key = 't_key' t_value = 9223372036854775807 api.set_server_config_int64(t_group, t_key, t_value) t_ret = api.get_server_config_int64(t_group, t_key) assert t_ret == t_value #test_set_server_config_string and get_server_config_string t_group = 't_group' t_key = 't_key' t_value = 't_value' api.set_server_config_string(t_group, t_key, t_value) t_ret = api.get_server_config_string(t_group, t_key) assert t_ret == t_value #test_set_server_config_boolean and get_server_config_boolean t_group = 't_group' t_key = 't_key' t_value = True api.set_server_config_boolean(t_group, t_key, t_value) t_ret = api.get_server_config_boolean(t_group, t_key) assert t_ret == t_value t_value = False api.set_server_config_boolean(t_group, t_key, t_value) t_ret = api.get_server_config_boolean(t_group, t_key) assert t_ret == t_value ================================================ FILE: tests/test_share_and_perm/test_shared_repo_perm.py ================================================ import pytest import time from seaserv import seafile_api as api from seaserv import ccnet_api from tests.config import ADMIN_USER, USER, USER2 from tests.utils import assert_repo_with_permission @pytest.mark.parametrize('permission', ['r', 'rw']) def test_share_repo_to_user(repo, permission): assert api.check_permission(repo.id, USER) == 'rw' assert api.check_permission(repo.id, USER2) is None assert api.repo_has_been_shared(repo.id) == False api.share_repo(repo.id, USER, USER2, permission) assert api.check_permission(repo.id, USER2) == permission assert api.repo_has_been_shared(repo.id) repos = api.get_share_in_repo_list(USER2, 0, 1) assert_repo_with_permission(repo, repos, permission) repos = api.get_share_out_repo_list(USER, 0, 1) assert_repo_with_permission(repo, repos, permission) users = api.list_repo_shared_to(USER, repo.id) assert len (users) == 1 assert users[0].repo_id == repo.id assert users[0].user == USER2 assert users[0].perm == permission api.remove_share(repo.id, USER, USER2) assert api.check_permission(repo.id, USER2) is None @pytest.mark.parametrize('permission', ['r', 'rw']) def test_share_repo_to_group(repo, group, permission): assert api.check_permission(repo.id, USER) == 'rw' assert api.check_permission(repo.id, USER2) is None repos = api.get_repos_by_group(group.id) assert len(repos) == 0 group_list = ccnet_api.get_groups(USER) assert len(group_list) == 1 group_list = ccnet_api.get_groups(USER2) assert len(group_list) == 0 api.group_share_repo(repo.id, group.id, USER, permission) repos = api.get_repos_by_group(group.id) assert_repo_with_permission(repo, repos, permission) group_ids = api.get_shared_group_ids_by_repo(repo.id) assert group_ids[0] == str(group.id) group_list = api.list_repo_shared_group_by_user(USER, repo.id) assert len(group_list) == 1 group_list = api.list_repo_shared_group_by_user(USER2, repo.id) assert len(group_list) == 0 repo_get = api.get_group_shared_repo_by_path (repo.id, None, group.id) assert repo_get and repo_get.repo_id == repo.id ccnet_api.group_add_member(group.id, USER, USER2) group_list = ccnet_api.get_groups(USER2) assert len(group_list) == 1 group = group_list[0] assert group.id == group.id repos2 = api.get_repos_by_group(group.id) assert_repo_with_permission(repo, repos2, permission) assert api.check_permission(repo.id, USER2) == permission repos = api.get_group_repos_by_user (USER) assert len(repos) == 1 repoids = api.get_group_repoids(group.id) assert len(repoids) == 1 repos = api.get_group_repos_by_owner(USER) assert len(repos) == 1 api.remove_group_repos_by_owner(group.id, USER) repos = api.get_group_repos_by_owner(USER) assert len(repos) == 0 api.set_group_repo(repo.id, group.id, USER, permission) repos = api.get_repos_by_group(group.id) assert len(repos) == 1 api.remove_group_repos(group.id) repos = api.get_repos_by_group(group.id) assert len(repos) == 0 api.group_unshare_repo(repo.id, group.id, USER) repos = api.get_repos_by_group(group.id) assert len(repos) == 0 assert api.check_permission(repo.id, USER2) is None @pytest.mark.parametrize('permission', ['r', 'rw']) def test_share_dir_to_user(repo, permission): v_repo_id_1 = api.share_subdir_to_user(repo.id, '/dir1', USER, USER2, permission) v_repo_id_2 = api.share_subdir_to_user(repo.id, '/dir2', USER, USER2, permission) assert api.check_permission(v_repo_id_1, USER2) == permission assert api.check_permission(v_repo_id_2, USER2) == permission vir_repo_2 = api.get_shared_repo_by_path(repo.id, '/dir2', USER2) assert vir_repo_2.permission == permission users = api.get_shared_users_for_subdir(repo.id, '/dir1', USER) assert len(users) == 1 and users[0].user == USER2 assert api.del_file(repo.id, '/', '[\"dir1\"]', USER) == 0 assert api.unshare_subdir_for_user(repo.id, '/dir2', USER, USER2) == 0 time.sleep(2.5) assert api.get_shared_repo_by_path(repo.id, '/dir1', USER2) is None assert api.get_shared_repo_by_path(repo.id, '/dir2', USER2) is None @pytest.mark.parametrize('permission', ['r', 'rw']) def test_share_dir_to_group(repo, group, permission): assert ccnet_api.group_add_member(group.id, USER, USER2) == 0 v_repo_id_1 = api.share_subdir_to_group(repo.id, '/dir1', USER, group.id, permission) v_repo_id_2 = api.share_subdir_to_group(repo.id, '/dir2', USER, group.id, permission) assert api.check_permission(v_repo_id_1, USER2) == permission assert api.check_permission(v_repo_id_2, USER2) == permission repo_get = api.get_group_shared_repo_by_path (repo.id, '/dir1', group.id) assert repo_get and repo_get.repo_id == v_repo_id_1 users = api.get_shared_groups_for_subdir(repo.id, '/dir1', USER) assert len(users) == 1 assert api.del_file(repo.id, '/', '[\"dir1\"]', USER) == 0 assert api.unshare_subdir_for_group(repo.id, '/dir2', USER, group.id) == 0 time.sleep(2.5) assert api.check_permission(v_repo_id_1, USER2) is None assert api.check_permission(v_repo_id_2, USER2) is None @pytest.mark.parametrize('permission_to_share, permission_to_set', [('r', 'rw'), ('rw', 'r')]) def test_set_share_permission(repo, permission_to_share, permission_to_set): assert api.check_permission(repo.id, USER2) == None api.share_repo(repo.id, USER, USER2, permission_to_share) assert api.check_permission(repo.id, USER2) == permission_to_share api.set_share_permission(repo.id, USER, USER2, permission_to_set) assert api.check_permission(repo.id, USER2) == permission_to_set api.remove_share(repo.id, USER, USER2) @pytest.mark.parametrize('permission_to_share, permission_to_set', [('r', 'rw'), ('rw', 'r')]) def set_group_repo_permission(repo, group, permission_to_share, permission_to_set): ccnet_api.group_add_member(group.id, USER, USER2) assert api.check_permission(repo.id, USER2) == None api.set_group_repo(repo.id, group.id, USER, permission_to_share) assert api.check_permission(repo.id, USER2) == permission_to_share api.set_group_repo_permission(group.id, repo.id, permission_to_set) assert api.check_permission(repo.id, USER2) == permission_to_set api.group_unshare_repo(repo.id, group.id, USER) @pytest.mark.parametrize('permission_to_share, permission_to_update', [('r', 'rw'), ('rw', 'r')]) def test_update_share_subdir_perm_for_user(repo, permission_to_share, permission_to_update): v_repo_id = api.share_subdir_to_user(repo.id, '/dir1', USER, USER2, permission_to_share) assert api.check_permission(v_repo_id, USER2) == permission_to_share api.update_share_subdir_perm_for_user(repo.id, '/dir1', USER, USER2, permission_to_update) assert api.check_permission(v_repo_id, USER2) == permission_to_update api.unshare_subdir_for_user(repo.id, '/dir1', USER, USER2) == 0 @pytest.mark.parametrize('permission_to_share, permission_to_update', [('r', 'rw'), ('rw', 'r')]) def test_update_share_subdir_perm_for_group(repo, group, permission_to_update, permission_to_share): ccnet_api.group_add_member(group.id, USER, USER2) v_repo_id = api.share_subdir_to_group(repo.id, '/dir1', USER, group.id, permission_to_share) assert api.check_permission(v_repo_id, USER2) == permission_to_share api.update_share_subdir_perm_for_group(repo.id, '/dir1', USER, group.id, permission_to_update) assert api.check_permission(v_repo_id, USER2) == permission_to_update api.unshare_subdir_for_group(repo.id, '/dir1', USER, group.id) @pytest.mark.parametrize('permission', ['r', 'rw']) def test_get_shared_users_by_repo(repo, group, permission): ccnet_api.group_add_member(group.id, USER, USER2) t_users = api.get_shared_users_by_repo(repo.id) assert len(t_users) == 0 api.share_repo(repo.id, USER, USER2, permission) api.set_group_repo(repo.id, group.id, ADMIN_USER, permission) t_users = api.get_shared_users_by_repo(repo.id) assert len(t_users) == 2 api.remove_share(repo.id, USER, USER2) api.group_unshare_repo(repo.id, group.id, USER) @pytest.mark.parametrize('permission', ['r', 'rw']) def test_subdir_permission_in_virtual_repo(repo, group, permission): api.post_dir(repo.id, '/dir1', 'subdir1', USER) api.post_dir(repo.id, '/dir2', 'subdir2', USER) v_repo_id_1 = api.share_subdir_to_user(repo.id, '/dir1', USER, USER2, permission) v_subdir_repo_id_1 = api.create_virtual_repo(v_repo_id_1, '/subdir1', 'subdir1', 'test_desc', USER, passwd='') assert api.check_permission(v_subdir_repo_id_1, USER2) == permission assert ccnet_api.group_add_member(group.id, USER, USER2) == 0 v_repo_id_2 = api.share_subdir_to_group(repo.id, '/dir2', USER, group.id, permission) v_subdir_repo_id_2 = api.create_virtual_repo(v_repo_id_2, '/subdir2', 'subdir2', 'test_desc', USER, passwd='') assert api.check_permission(v_subdir_repo_id_2, USER2) == permission assert api.unshare_subdir_for_user(repo.id, '/dir1', USER, USER2) == 0 assert api.unshare_subdir_for_group(repo.id, '/dir2', USER, group.id) == 0 ================================================ FILE: tests/test_share_and_perm/test_structure_repo_perm.py ================================================ import pytest from seaserv import seafile_api as api from seaserv import ccnet_api from tests.config import ADMIN_USER, USER, USER2 @pytest.mark.parametrize('permission', ['r', 'rw']) def test_repo_perm_in_structure (repo, permission): id1 = ccnet_api.create_group('group1', USER, parent_group_id=-1) id2 = ccnet_api.create_group('group2', USER, parent_group_id = id1) assert id1 != -1 and id2 != -1 # USER2 in child group (id2) has permission to access repo in parent group (id1) # assert ccnet_api.group_add_member(id2, USER, USER2) != -1 assert api.group_share_repo(repo.id, id1, USER, permission) != -1 assert api.check_permission(repo.id, USER2) == permission assert api.group_unshare_repo(repo.id, id1, USER) != -1 assert api.check_permission(repo.id, USER2) == None assert ccnet_api.remove_group(id2) != -1 assert ccnet_api.remove_group(id1) != -1 ================================================ FILE: tests/test_trashed_repos/test_trashed_repos.py ================================================ import pytest from tests.config import USER from seaserv import seafile_api as api def test_trashed_repos(repo): #test get_trash_repo_list t_start = -1 t_limit = -1 t_trash_repos_tmp = api.get_trash_repo_list(t_start, t_limit) api.remove_repo(repo.id) t_trash_repos = api.get_trash_repo_list(t_start, t_limit) assert len(t_trash_repos) == len(t_trash_repos_tmp) + 1 t_trash_repos_tmp = t_trash_repos #test get_trash_repo_owner t_owner = api.get_trash_repo_owner(repo.id) assert t_owner == USER #test restore_repo_from_trash t_repo_get = api.get_repo(repo.id) assert t_repo_get == None api.restore_repo_from_trash(repo.id) t_repo_get = api.get_repo(repo.id) assert t_repo_get and t_repo_get.repo_id == repo.id #test del_repo_from_trash api.del_repo_from_trash(repo.id) t_trash_repos = api.get_trash_repo_list(t_start, t_limit) assert len(t_trash_repos) == len(t_trash_repos_tmp) - 1 #test get_trash_repos_by_owner t_trash_repos_by_owner_tmp = api.get_trash_repos_by_owner(USER) api.remove_repo(repo.id) t_trash_repos_by_owner = api.get_trash_repos_by_owner(USER) assert len(t_trash_repos_by_owner) == len(t_trash_repos_by_owner_tmp) + 1 #test empty_repo_trash api.empty_repo_trash() t_trash_repos = api.get_trash_repo_list(t_start, t_limit) assert len(t_trash_repos) == 0 #test empty_repo_trash_by_owner t_repo_id = api.create_repo('test_trashed_repos', '', USER, passwd=None) api.remove_repo(t_repo_id) t_trash_repos_by_owner = api.get_trash_repos_by_owner(USER) assert len(t_trash_repos_by_owner) != 0 api.empty_repo_trash_by_owner(USER) t_trash_repos_by_owner = api.get_trash_repos_by_owner(USER) assert len(t_trash_repos_by_owner) == 0 ================================================ FILE: tests/test_upload/account.conf ================================================ [account] server = http://192.168.60.132 username = 123456@qq.com password = 123456 repoid = e63f9fc8-880a-427f-b2c4-42c00538cb94 thread_num = 1000 ================================================ FILE: tests/test_upload/go.mod ================================================ module test_upload go 1.18 require ( github.com/haiwen/seafile-server/fileserver v0.0.0-20220621072834-faf434def97d // indirect gopkg.in/ini.v1 v1.66.6 // indirect ) ================================================ FILE: tests/test_upload/go.sum ================================================ github.com/PuerkitoBio/goquery v1.5.1/go.mod h1:GsLWisAFVj4WgDibEWF4pvYnkVQBpKBKeU+7zCJoLcc= github.com/andybalholm/cascadia v1.1.0/go.mod h1:GsXiBklL0woXo1j/WYWtSYYC4ouU9PqHO0sqidkEA4Y= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/haiwen/seafile-server/fileserver v0.0.0-20220621072834-faf434def97d h1:7W5BeFzUFCx+xz5pINiuRJesr82pA2Gq0LZeHXBI0jE= github.com/haiwen/seafile-server/fileserver v0.0.0-20220621072834-faf434def97d/go.mod h1:3r5rRrKrYibzy1quQOR0/yvT+7L+iuAFAwTcggCp6wg= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/mattn/go-sqlite3 v1.14.0/go.mod h1:JIl7NbARA7phWnGvh0LKTyg7S9BA+6gx71ShQilpsus= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/net v0.0.0-20180218175443-cbe0f9307d01/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= gopkg.in/ini.v1 v1.55.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.66.6 h1:LATuAqN/shcYAOkv3wl2L4rkaKqkcgTBQjOyYDvcPKI= gopkg.in/ini.v1 v1.66.6/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= ================================================ FILE: tests/test_upload/readme.md ================================================ go run test_upload.go -c accont.conf -p runtime ================================================ FILE: tests/test_upload/test_upload.go ================================================ package main import "fmt" import "io" import "sync" import "flag" import "log" import "encoding/json" import "bytes" import "net/http" import "mime/multipart" import "path/filepath" import "gopkg.in/ini.v1" import "github.com/haiwen/seafile-server/fileserver/searpc" type Options struct { server string username string password string repoID string threadNum int } var confPath string var rpcPipePath string var options Options var rpcclient *searpc.Client func init() { flag.StringVar(&confPath, "c", "", "config file path") flag.StringVar(&rpcPipePath, "p", "", "rpc pipe path") } func main() { flag.Parse() pipePath := filepath.Join(rpcPipePath, "seafile.sock") rpcclient = searpc.Init(pipePath, "seafserv-threaded-rpcserver") config, err := ini.Load(confPath) if err != nil { log.Fatalf("Failed to load config file: %v", err) } section, err := config.GetSection("account") if err != nil { log.Fatal("No account section in config file.") } key, err := section.GetKey("server") if err == nil { options.server = key.String() } key, err = section.GetKey("username") if err == nil { options.username = key.String() } key, err = section.GetKey("password") if err == nil { options.password = key.String() } key, err = section.GetKey("repoid") if err == nil { options.repoID = key.String() } key, err = section.GetKey("thread_num") if err == nil { options.threadNum, _ = key.Int() } objID := "{\"parent_dir\":\"/\"}" token, err := rpcclient.Call("seafile_web_get_access_token", options.repoID, objID, "upload", options.username, false) if err != nil { log.Fatal("Failed to get web access token\n") } accessToken, _ := token.(string) url := fmt.Sprintf("%s:8082/upload-api/%s", options.server, accessToken) content := []byte("123456") var group sync.WaitGroup for i := 0; i < options.threadNum; i++ { group.Add(1) go func(i int) { values := make(map[string]io.Reader) values["file"] = bytes.NewReader(content) values["parent_dir"] = bytes.NewBuffer([]byte("/")) // values["relative_path"] = bytes.NewBuffer([]byte(relativePath)) values["replace"] = bytes.NewBuffer([]byte("0")) form, contentType, err := createForm(values, "111.md") if err != nil { log.Fatal("Failed to create multipart form: %v", err) } headers := make(map[string][]string) headers["Content-Type"] = []string{contentType} // headers["Authorization"] = []string{"Token " + accessToken.(string)} status, body, err := HttpCommon("POST", url, headers, form) log.Printf("[%d]upload status: %d return body: %s error: %v\n", i, status, string(body), err) group.Done() }(i) } group.Wait() } func createForm(values map[string]io.Reader, name string) (io.Reader, string, error) { buf := new(bytes.Buffer) w := multipart.NewWriter(buf) defer w.Close() for k, v := range values { var fw io.Writer var err error if k == "file" { if fw, err = w.CreateFormFile(k, name); err != nil { return nil, "", err } } else { if fw, err = w.CreateFormField(k); err != nil { return nil, "", err } } if _, err = io.Copy(fw, v); err != nil { return nil, "", err } } return buf, w.FormDataContentType(), nil } func HttpCommon(method, url string, header map[string][]string, reader io.Reader) (int, []byte, error) { req, err := http.NewRequest(method, url, reader) if err != nil { return -1, nil, err } req.Header = header rsp, err := http.DefaultClient.Do(req) if err != nil { return -1, nil, err } defer rsp.Body.Close() if rsp.StatusCode == http.StatusNotFound { return rsp.StatusCode, nil, fmt.Errorf("url %s not found", url) } body, err := io.ReadAll(rsp.Body) if err != nil { return rsp.StatusCode, nil, err } return rsp.StatusCode, body, nil } func getToken() string { url := fmt.Sprintf("%s:8000/api2/auth-token/", options.server) header := make(map[string][]string) header["Content-Type"] = []string{"application/x-www-form-urlencoded"} data := []byte(fmt.Sprintf("username=%s&password=%s", options.username, options.password)) _, body, err := HttpCommon("POST", url, header, bytes.NewReader(data)) if err != nil { return "" } tokenMap := make(map[string]interface{}) err = json.Unmarshal(body, &tokenMap) if err != nil { return "" } token, _ := tokenMap["token"].(string) return token } ================================================ FILE: tests/test_user/test_users.py ================================================ import pytest from seaserv import seafile_api as api from seaserv import ccnet_api from tests.utils import randstring from tests.config import USER, USER2, ADMIN_USER def test_user_management(repo): email1 = '%s@%s.com' % (randstring(6), randstring(6)) email2 = '%s@%s.com' % (randstring(6), randstring(6)) passwd1 = 'randstring(6)' passwd2 = 'randstring(6)' ccnet_api.add_emailuser(email1, passwd1, 1, 1) ccnet_api.add_emailuser(email2, passwd2, 0, 0) ccnet_email1 = ccnet_api.get_emailuser(email1) ccnet_email2 = ccnet_api.get_emailuser(email2) assert ccnet_email1.is_active == True assert ccnet_email1.is_staff == True assert ccnet_email2.is_active == False assert ccnet_email2.is_staff == False assert ccnet_api.validate_emailuser(email1, passwd1) == 0 assert ccnet_api.validate_emailuser(email2, passwd2) == 0 users = ccnet_api.search_emailusers('DB',email1, -1, -1) assert len(users) == 1 user_ccnet = users[0] assert user_ccnet.email == email1 user_counts = ccnet_api.count_emailusers('DB') user_numbers = ccnet_api.get_emailusers('DB', -1, -1) ccnet_api.update_emailuser('DB', ccnet_email2.id, passwd2, 1, 1) email2_new = ccnet_api.get_emailuser(email2) assert email2_new.is_active == True assert email2_new.is_staff == True #test group when update user id id1 = ccnet_api.create_group('group1', email1, parent_group_id=-1) assert id1 != -1 group1 = ccnet_api.get_group(id1) assert group1.parent_group_id == -1 # test shared repo when update user id api.share_repo(repo.id, USER, email1, "rw") assert api.repo_has_been_shared(repo.id) new_email1 = '%s@%s.com' % (randstring(6), randstring(6)) assert ccnet_api.update_emailuser_id (email1, new_email1) == 0 shared_users = api.list_repo_shared_to(USER, repo.id) assert len (shared_users) == 1 assert shared_users[0].repo_id == repo.id assert shared_users[0].user == new_email1 assert shared_users[0].perm == "rw" api.remove_share(repo.id, USER, new_email1) email1_groups = ccnet_api.get_groups (new_email1) assert len (email1_groups) == 1 assert email1_groups[0].id == id1 rm1 = ccnet_api.remove_group(id1) assert rm1 == 0 ccnet_api.remove_emailuser('DB', new_email1) ccnet_api.remove_emailuser('DB', email2) ================================================ FILE: tests/utils.py ================================================ import os import random import string from seaserv import ccnet_api, seafile_api def create_and_get_repo(*a, **kw): repo_id = seafile_api.create_repo(*a, **kw) repo = seafile_api.get_repo(repo_id) return repo def randstring(length=12): return ''.join(random.choice(string.ascii_lowercase) for i in range(length)) def create_and_get_group(*a, **kw): group_id = ccnet_api.create_group(*a, **kw) group = ccnet_api.get_group(group_id) return group def assert_repo_with_permission(r1, r2, permission): if isinstance(r2, list): assert len(r2) == 1 r2 = r2[0] assert r2.id == r1.id assert r2.permission == permission ================================================ FILE: tools/Makefile.am ================================================ #AM_CPPFLAGS = @GLIB2_CFLAGS@ EXTRA_DIST = seafile-admin bin_SCRIPTS = seafile-admin ================================================ FILE: tools/seafile-admin ================================================ #!/usr/bin/env python # coding: UTF-8 '''This is the helper script to setup/manage your seafile server ''' import sys #################### ### Requires Python 2.6+ #################### if sys.version_info.major == 3: print 'Python 3 not supported yet. Quit now' sys.exit(1) if sys.version_info.minor < 6: print 'Python 2.6 or above is required. Quit now' sys.exit(1) import os import time import re import shutil import subprocess import argparse import uuid try: import readline # Avoid pylint 'unused import' warning dummy = readline except ImportError: pass #################### ### Cosntants #################### SERVER_MANUAL_HTTP = 'https://github.com/haiwen/seafile/wiki' SEAFILE_GOOGLE_GROUP = 'https://groups.google.com/forum/?fromgroups#!forum/seafile' SEAFILE_WEBSITE = 'http://www.seafile.com' SEAHUB_DOWNLOAD_URL = 'https://seafile.com.cn/downloads/seahub-latest.tar.gz' #################### ### Global variables #################### cwd = os.getcwd() SCRIPT_NAME = os.path.basename(sys.argv[0]) PYTHON = sys.executable conf = {} CONF_CCNET_DIR = 'ccnet_dir' CONF_SEAFILE_DIR = 'seafile_dir' CONF_SEAHUB_DIR = 'seafile_dir' CONF_SEAFILE_PORT = 'seafile_port' CONF_FILESERVER_PORT = 'fileserver_port' CONF_IP_OR_DOMAIN = 'ip_or_domain' CONF_SEAHUB_CONF = 'seahub_conf' CONF_SEAHUB_DIR = 'seahub_dir' CONF_SEAHUB_PORT = 'seahub_port' CONF_SEAHUB_PIDFILE = 'seahub_pidfile' CONF_SEAHUB_OUTLOG = 'seahub_outlog' CONF_SEAHUB_ERRLOG = 'seahub_errlog' CONF_CCNET_CONF_EXISTS = 'ccnet_conf_exists' CONF_SEAFILE_CONF_EXISTS = 'seafile_conf_exists' CONF_ADMIN_EMAIL = 'admin_email' CONF_ADMIN_PASSWORD = 'admin_password' CONF_SEAFILE_CENTRAL_CONF_DIR = 'central_config_dir' #################### ### Common helper functions def highlight(content): '''Add ANSI color to content to get it highlighted on terminal''' return '\x1b[33m%s\x1b[m' % content def info(msg): print msg def error(msg): print 'Error: ' + msg sys.exit(1) def ask_question(desc, key=None, note=None, default=None, validate=None, yes_or_no=False, invalidate_msg=None): '''Ask a question, return the answer. The optional validate param is a function used to validate the answer. If yes_or_no is True, then a boolean value would be returned. ''' assert key or yes_or_no desc = highlight(desc) if note: desc += ' (%s)' % note if default: desc += '\n' + ('[default %s ]' % default) else: if yes_or_no: desc += '\n[yes or no]' else: desc += '\n' + ('[%s ]' % key) desc += ' ' while True: answer = raw_input(desc) if not answer: if default: print '' return default else: continue answer = answer.strip() if yes_or_no: if answer != 'yes' and answer != 'no': print '\nPlease answer yes or no\n' continue else: return answer == 'yes' else: if validate and not validate(answer): if invalidate_msg: print '\n%s\n' % invalidate_msg else: print '\n"%s" is not a valid %s\n' % (answer, key) continue print '' return answer def run_argv(argv, cwd=None, env=None, suppress_stdout=False, suppress_stderr=False): '''Run a program and wait it to finish, and return its exit code. The standard output of this program is supressed. ''' with open(os.devnull, 'w') as devnull: if suppress_stdout: stdout = devnull else: stdout = sys.stdout if suppress_stderr: stderr = devnull else: stderr = sys.stderr proc = subprocess.Popen(argv, cwd=cwd, stdout=stdout, stderr=stderr, env=env) return proc.wait() def run(cmdline, cwd=None, env=None, suppress_stdout=False, suppress_stderr=False): '''Like run_argv but specify a command line string instead of argv''' with open(os.devnull, 'w') as devnull: if suppress_stdout: stdout = devnull else: stdout = sys.stdout if suppress_stderr: stderr = devnull else: stderr = sys.stderr proc = subprocess.Popen(cmdline, cwd=cwd, stdout=stdout, stderr=stderr, env=env, shell=True) return proc.wait() def is_running(process): '''Detect if there is a process with the given name running''' argv = ['pgrep', '-f', process] return run_argv(argv, suppress_stdout=True) == 0 def pkill(process): '''Kill the program with the given name''' argv = ['pkill', '-f', process] run_argv(argv) def kill(pid): '''Kill the program with the given pid''' argv = ['kill', pid] run_argv(argv) def must_mkdir(path): '''Create a directory, exit on failure''' try: os.mkdir(path) except OSError, e: error('failed to create directory %s:%s' % (path, e)) ### END of Common helper functions #################### def check_seafile_install(): '''Check if seafile has been correctly built and installed in this system ''' dirs = os.environ['PATH'].split(':') def exist_in_path(prog): '''Test whether prog exists in system path''' for d in dirs: if d == '': continue path = os.path.join(d, prog) if os.path.exists(path): return True return False def check_prog(name): if not exist_in_path(name): error( '%s not found in PATH. Have you built and installed seafile server?' % name) progs = [ 'ccnet-init', 'seaf-server-init', 'seaf-server', 'ccnet-server', 'seafile-controller', ] for prog in progs: check_prog(prog) def get_seahub_env(): '''And PYTHONPATH and CCNET_CONF_DIR/SEAFILE_CONF_DIR to env, which is needed by seahub ''' seahub_dir = conf[CONF_SEAHUB_DIR] seahub_thirdpart_dir = os.path.join(seahub_dir, 'thirdpart') env = dict(os.environ) pypath = env.get('PYTHONPATH', '') pathlist = [p for p in pypath.split(':') if p != ''] pathlist.append(seahub_thirdpart_dir) newpypath = ':'.join(pathlist) env['PYTHONPATH'] = newpypath env['CCNET_CONF_DIR'] = conf[CONF_CCNET_DIR] env['SEAFILE_CONF_DIR'] = conf[CONF_SEAFILE_DIR] env['SEAFILE_CENTRAL_CONF_DIR'] = conf[CONF_SEAFILE_CENTRAL_CONF_DIR] return env #################### ### command #################### def welcome(): '''Show welcome message when running the command''' welcome_msg = '''\ ----------------------------------------------------------------- This script will guide you to config and setup your seafile server. Make sure you have read seafile server manual at %s Press [ENTER] to continue ----------------------------------------------------------------- ''' % SERVER_MANUAL_HTTP print welcome_msg raw_input() def get_server_ip_or_domain(): def validate(s): r = r'^[^.].+\..+[^.]$' return bool(re.match(r, s)) question = 'What is the ip of the server?' key = 'ip or domain' note = 'For example: www.mycompany.com, 192.168.1.101' conf[CONF_IP_OR_DOMAIN] = ask_question(question, key=key, note=note, validate=validate) def get_ccnet_conf_dir(): ccnet_conf_dir = os.path.join(cwd, 'ccnet') if os.path.exists(ccnet_conf_dir): question = 'It seems there already exists ccnet config files in %s, Do you want to use them?' % ccnet_conf_dir yesno = ask_question(question, yes_or_no=True) if not yesno: print highlight( '\nRemove the directory %s first, and run the script again.\n' % ccnet_conf_dir) sys.exit(1) else: conf[CONF_CCNET_CONF_EXISTS] = True else: conf[CONF_CCNET_CONF_EXISTS] = False conf[CONF_CCNET_DIR] = ccnet_conf_dir def get_seafile_port(): def validate(s): try: port = int(s) except ValueError: return False return port > 0 and port < 65536 question = 'Which port do you want to use for the seafile server?' key = 'seafile server port' default = '12001' conf[CONF_SEAFILE_PORT] = ask_question(question, key=key, default=default, validate=validate) def get_fileserver_port(): def validate(s): try: port = int(s) except ValueError: return False return port > 0 and port < 65536 question = 'Which port do you want to use for the seafile fileserver?' key = 'seafile fileserver port' default = '8082' conf[CONF_FILESERVER_PORT] = ask_question(question, key=key, default=default, validate=validate) def get_seafile_data_dir(): question = 'Where do you want to put your seafile data?' key = 'seafile-data' note = 'Please use a volume with enough free space' default = os.path.join(cwd, 'seafile-data') seafile_data_dir = ask_question(question, key=key, note=note, default=default) if os.path.exists(seafile_data_dir): question = 'It seems there already exists seafile data in %s, Do you want to use them?' % seafile_data_dir yesno = ask_question(question, yes_or_no=True) if not yesno: print highlight( '\nRemove the directory %s first, and run the script again.\n' % seafile_data_dir) sys.exit(1) else: conf[CONF_SEAFILE_CONF_EXISTS] = True else: conf[CONF_SEAFILE_CONF_EXISTS] = False conf[CONF_SEAFILE_DIR] = seafile_data_dir def create_gunicorn_conf(): runtime_dir = os.path.join(cwd, 'seafile-server', 'runtime') confpath = os.path.join(runtime_dir, 'seahub.conf') if os.path.exists(confpath): return if not os.path.exists(runtime_dir): must_mkdir(runtime_dir) content = '''\ import os daemon = True workers = 3 # Logging runtime_dir = os.path.dirname(__file__) pidfile = os.path.join(runtime_dir, 'seahub.pid') errorlog = os.path.join(runtime_dir, 'error.log') accesslog = os.path.join(runtime_dir, 'access.log') ''' try: with open(confpath, 'w') as fp: fp.write(content) except: error('Failed to write seahub config') def gen_seahub_secret_key(): data = str(uuid.uuid4()) + str(uuid.uuid4()) return data[:40] def create_seahub_settings_py(): seahub_settings_py = os.path.join(cwd, 'conf', 'seahub_settings.py') try: with open(seahub_settings_py, 'w') as fp: line = "SECRET_KEY = '%s'" % gen_seahub_secret_key() fp.write(line) except Exception, e: error('failed to create %s: %s' % (seahub_settings_py, e)) def move_avatar(): seahub_data_dir = os.path.join(cwd, 'seahub-data') outside_avatar_dir = os.path.join(seahub_data_dir, 'avatars') seahub_avatar_dir = os.path.join(conf[CONF_SEAHUB_DIR], 'media', 'avatars') if os.path.exists(outside_avatar_dir): return if not os.path.exists(seahub_data_dir): must_mkdir(seahub_data_dir) # move the avatars dir outside shutil.move(seahub_avatar_dir, outside_avatar_dir) # make the the original avatars dir a symlink pointing to the outside dir os.symlink(outside_avatar_dir, seahub_avatar_dir) def init_seahub(): seahub_dir = conf[CONF_SEAHUB_DIR] # create seahub_settings.py create_seahub_settings_py() argv = [PYTHON, 'manage.py', 'syncdb'] # Set proper PYTHONPATH before run django syncdb command env = get_seahub_env() print print info('Now initializing seahub database, please wait...') print if run_argv(argv, cwd=seahub_dir, env=env) != 0: error('Seahub syncdb failed') info('done') move_avatar() create_gunicorn_conf() def check_django_version(): '''Requires django 1.8''' import django if django.VERSION[0] != 1 or django.VERSION[1] != 8: error('Django 1.8 is required') del django def check_python_module(import_name, package_name=None, silent=False): package_name = package_name or import_name if not silent: info('checking %s' % package_name) try: __import__(import_name) except ImportError: error('Python module "%s" not found. Please install it first' % package_name) def check_python_dependencies(silent=False): '''Ensure all python libraries we need are installed''' if not silent: info('check python modules ...') check_django_version() def check(*a, **kw): kw.setdefault('silent', silent) check_python_module(*a, **kw) pkgs = [ 'sqlite3', 'chardet', 'six', 'pytz', 'rest_framework', 'compressor', 'statici18n', 'jsonfield', 'dateutil', 'constance', 'openpyxl', ] # yapf: disable for pkg in pkgs: check(pkg) check('PIL', 'python imaging library(PIL)') print def config_ccnet_seafile(): get_ccnet_conf_dir() if not conf[CONF_CCNET_CONF_EXISTS]: get_server_ip_or_domain() get_seafile_data_dir() if not conf[CONF_SEAFILE_CONF_EXISTS]: get_seafile_port() get_fileserver_port() info('This is your configuration') info('------------------------------------------') if conf[CONF_CCNET_CONF_EXISTS]: info('ccnet config: use existing config in %s' % highlight(conf[CONF_CCNET_DIR])) else: info('ccnet conf dir: %s' % highlight(conf[CONF_CCNET_DIR])) info('server host: %s' % highlight(conf[CONF_IP_OR_DOMAIN])) if conf[CONF_SEAFILE_CONF_EXISTS]: info('seafile: use existing config in %s' % highlight(conf[CONF_SEAFILE_DIR])) else: info('seafile data dir: %s' % highlight(conf[CONF_SEAFILE_DIR])) info('seafile port: %s' % highlight(conf[CONF_SEAFILE_PORT])) info('seafile fileserver port: %s' % highlight(conf[CONF_FILESERVER_PORT])) info('------------------------------------------') info('Press ENTER if the config is right, or anything else to re-config ') if raw_input() != '': config_ccnet_seafile() else: return def init_ccnet_seafile(): if not conf[CONF_CCNET_CONF_EXISTS]: info('Generating ccnet configuration...') argv = [ 'ccnet-init', '-F', conf[CONF_SEAFILE_CENTRAL_CONF_DIR], '-c', conf[CONF_CCNET_DIR], '--host', conf[CONF_IP_OR_DOMAIN], ] if run_argv(argv) != 0: error('failed to init ccnet configuration') info('done') if not conf[CONF_SEAFILE_CONF_EXISTS]: info('Generating seafile configuration...') argv = [ 'seaf-server-init', '-F', conf[CONF_SEAFILE_CENTRAL_CONF_DIR], '--seafile-dir', conf[CONF_SEAFILE_DIR], '--port', conf[CONF_SEAFILE_PORT], '--fileserver-port', conf[CONF_FILESERVER_PORT], ] if run_argv(argv) != 0: error('failed to init seafile configuration') info('done') seafile_ini = os.path.join(conf[CONF_CCNET_DIR], 'seafile.ini') with open(seafile_ini, 'w') as fp: fp.write(conf[CONF_SEAFILE_DIR]) #################### ### command #################### def start_controller(): argv = [ 'seafile-controller', '-c', conf[CONF_CCNET_DIR], '-d', conf[CONF_SEAFILE_DIR], '-F', conf[CONF_SEAFILE_CENTRAL_CONF_DIR], ] info('Starting seafile-server...') if run_argv(argv) != 0: error('Failed to start seafile') # check again after several seconds time.sleep(10) if not is_running('seafile-controller'): error('Failed to start seafile') def start_seahub_gunicorn(): argv = [ 'gunicorn', 'seahub.wsgi:application', '-c', conf[CONF_SEAHUB_CONF], '-b', '0.0.0.0:%s' % conf[CONF_SEAHUB_PORT], ] info('Starting seahub...') env = get_seahub_env() if run_argv(argv, cwd=conf[CONF_SEAHUB_DIR], env=env) != 0: error('Failed to start seahub') info('Seahub running on port %s' % conf[CONF_SEAHUB_PORT]) def start_seahub_fastcgi(): info('Starting seahub in fastcgi mode...') argv = [ PYTHON, 'manage.py', 'runfcgi', 'host=%(host)s', 'port=%(port)s', 'pidfile=%(pidfile)s', 'outlog=%(outlog)s', 'errlog=%(errlog)s', ] host = os.environ.get('SEAFILE_FASTCGI_HOST', '127.0.0.1') cmdline = ' '.join(argv) % \ dict(host=host, port=conf[CONF_SEAHUB_PORT], pidfile=conf[CONF_SEAHUB_PIDFILE], outlog=conf[CONF_SEAHUB_OUTLOG], errlog=conf[CONF_SEAHUB_ERRLOG]) env = get_seahub_env() if run(cmdline, cwd=conf[CONF_SEAHUB_DIR], env=env) != 0: error('Failed to start seahub in fastcgi mode') info('Seahub running on port %s (fastcgi)' % conf[CONF_SEAHUB_PORT]) def read_seafile_data_dir(ccnet_conf_dir): '''Read the location of seafile-data from seafile.ini, also consider the upgrade from older version which do not has the seafile.ini feature ''' seafile_ini = os.path.join(ccnet_conf_dir, 'seafile.ini') if os.path.exists(seafile_ini): with open(seafile_ini, 'r') as fp: seafile_data_dir = fp.read().strip() else: # In previous seafile-admin, seafiled-data folder must be under # the top level directory, so we do not store the location of # seafile-data folder in seafile.ini seafile_data_dir = os.path.join(cwd, 'seafile-data') if os.path.exists(seafile_data_dir): with open(seafile_ini, 'w') as fp: fp.write(seafile_data_dir) return seafile_data_dir def check_layout(args): def error_not_found(path): error('%s not found' % path) ccnet_conf_dir = os.path.join(cwd, 'ccnet') if not os.path.exists(ccnet_conf_dir): error_not_found(ccnet_conf_dir) central_config_dir = os.path.join(cwd, 'conf') ccnet_conf = os.path.join(central_config_dir, 'ccnet.conf') if not os.path.exists(ccnet_conf): error_not_found(ccnet_conf) seafile_data_dir = read_seafile_data_dir(ccnet_conf_dir) if not os.path.exists(seafile_data_dir): error_not_found(seafile_data_dir) seafile_conf = os.path.join(central_config_dir, 'seafile.conf') if not os.path.exists(seafile_conf): error_not_found(seafile_conf) runtime_dir = os.path.join(cwd, 'seafile-server', 'runtime') seahub_conf = os.path.join(runtime_dir, 'seahub.conf') if not os.path.exists(seahub_conf): error_not_found(seahub_conf) seahub_dir = os.path.join(cwd, 'seafile-server', 'seahub') if not os.path.exists(seahub_conf): error_not_found(seahub_dir) conf[CONF_SEAFILE_CENTRAL_CONF_DIR] = central_config_dir conf[CONF_CCNET_DIR] = ccnet_conf_dir conf[CONF_SEAFILE_DIR] = seafile_data_dir conf[CONF_SEAHUB_DIR] = seahub_dir conf[CONF_SEAHUB_CONF] = seahub_conf conf[CONF_SEAHUB_PIDFILE] = os.path.join(runtime_dir, 'seahub.pid') conf[CONF_SEAHUB_OUTLOG] = os.path.join(runtime_dir, 'access.log') conf[CONF_SEAHUB_ERRLOG] = os.path.join(runtime_dir, 'error.log') def check_config(args): check_layout(args) try: port = int(args.port) except ValueError: error('invalid port: %s' % args.port) else: if port <= 0 or port > 65535: error('invalid port: %s' % args.port) conf[CONF_SEAHUB_PORT] = port def check_directory_layout(): seaf_server_dir = os.path.join(cwd, 'seafile-server') if not os.path.exists(seaf_server_dir): error( '"seafile-server/" not found in current directory. \nPlease run seafile-admin in the correct directory.') seahub_dir = os.path.join(seaf_server_dir, 'seahub') if not os.path.exists(seahub_dir): error( '"seafile-server/seahub/" not found. \nPlease download seahub first.') conf[CONF_SEAHUB_DIR] = seahub_dir def setup_seafile(args): # avoid pylint "unused variable" warning dummy = args welcome() check_python_dependencies() conf[CONF_SEAFILE_CENTRAL_CONF_DIR] = os.path.join(cwd, 'conf') config_ccnet_seafile() init_ccnet_seafile() init_seahub() print print '-----------------------------------------------------------------' print '-----------------------------------------------------------------' print 'Your seafile server configuration has been finished successfully.' print '-----------------------------------------------------------------' print '-----------------------------------------------------------------' print print 'To start/stop seafile server:' print print highlight(' $ cd %s' % cwd) print highlight(' $ %s { start | stop }' % SCRIPT_NAME) print print 'If you have any problem, refer to\n' print print ' Seafile server manual: %s' % SERVER_MANUAL_HTTP print print ' Seafile discussion group: %s' % SEAFILE_GOOGLE_GROUP print print ' Seafile website: %s' % SEAFILE_WEBSITE print print 'for more information.' print def check_necessary_files(): files = [ os.path.join(cwd, 'conf', 'ccnet.conf'), os.path.join(cwd, 'seafile-server', 'runtime', 'seahub.conf'), os.path.join(cwd, 'seahub.db'), os.path.join(cwd, 'conf', 'seahub_settings.py'), ] for fpath in files: if not os.path.exists(fpath): error('%s not found' % fpath) def start_seafile(args): '''start ccnet/seafile/seahub/fileserver''' if is_running('seafile-controller'): error(highlight('NOTE: Seafile is already running')) check_python_dependencies(silent=True) if args.fastcgi: check_python_module('flup', 'flup', silent=True) else: check_python_module('gunicorn', 'gunicorn', silent=True) check_necessary_files() check_config(args) start_controller() if args.port: try: port = int(args.port) except ValueError: error('invalid port: %s' % args.port) else: if port <= 0 or port > 65535: error('invalid port: %s' % args.port) if args.fastcgi: start_seahub_fastcgi() else: start_seahub_gunicorn() info('Done') def stop_seafile(dummy): info('Stopping seafile server') pkill('seafile-controller') runtime_dir = os.path.join(cwd, 'seafile-server', 'runtime') pidfile = os.path.join(runtime_dir, 'seahub.pid') try: with open(pidfile, 'r') as fp: pid = fp.read().strip('\n ') if pid: kill(pid) except: pass info('done') def reset_admin(args): '''reset seafile admin account''' check_layout(args) env = get_seahub_env() argv = [PYTHON, 'manage.py', 'createsuperuser'] env = get_seahub_env() seahub_dir = conf[CONF_SEAHUB_DIR] run_argv(argv, cwd=seahub_dir, env=env) def main(): check_seafile_install() check_directory_layout() parser = argparse.ArgumentParser() subparsers = parser.add_subparsers(title='subcommands', description='') parser_setup = subparsers.add_parser('setup', help='setup the seafile server') parser_setup.set_defaults(func=setup_seafile) parser_start = subparsers.add_parser('start', help='start the seafile server') parser_start.set_defaults(func=start_seafile) parser_start.add_argument('--fastcgi', help='start seahub in fastcgi mode', action='store_true') parser_start.add_argument('--port', help='start seahub in fastcgi mode', default='8000') parser_stop = subparsers.add_parser('stop', help='stop the seafile server') parser_stop.set_defaults(func=stop_seafile) parser_reset_admin = subparsers.add_parser( 'reset-admin', help='reset seafile admin account') parser_reset_admin.set_defaults(func=reset_admin) parser_create_admin = subparsers.add_parser( 'create-admin', help='create seafile admin account') parser_create_admin.set_defaults(func=reset_admin) args = parser.parse_args() args.func(args) if __name__ == '__main__': main() ================================================ FILE: updateversion.sh ================================================ #! /bin/sh if [ $# != "2" ]; then echo "$0 " exit fi old_ver=$1 new_ver=$2 if test "$(uname)" = "Darwin"; then sed -i '' -e "s|$old_ver|$new_ver|" web/setup_mac.py sed -i '' -e "s|VERSION=$old_ver|VERSION=$new_ver|" setupmac.sh sed -i '' -e "s|$old_ver|$new_ver|" gui/mac/seafile/seafile/*.plist else sed -i "s|$old_ver|$new_ver|" web/setup_mac.py sed -i "s|VERSION=$old_ver|VERSION=$new_ver|" setupmac.sh sed -i "s|$old_ver|$new_ver|" gui/mac/seafile/seafile/*.plist fi