Repository: haiwen/seafile-server
Branch: master
Commit: 377d6e5805ad
Files: 284
Total size: 2.8 MB
Directory structure:
gitextract_lx6ttehu/
├── .github/
│ └── workflows/
│ ├── ci.yml
│ └── golangci-lint.yml
├── .gitignore
├── LICENSE.txt
├── Makefile.am
├── README.markdown
├── README.testing.md
├── autogen.sh
├── ci/
│ ├── install-deps.sh
│ ├── requirements.txt
│ ├── run.py
│ ├── serverctl.py
│ └── utils.py
├── common/
│ ├── Makefile.am
│ ├── block-backend-fs.c
│ ├── block-backend.c
│ ├── block-backend.h
│ ├── block-mgr.c
│ ├── block-mgr.h
│ ├── block-tx-utils.c
│ ├── block-tx-utils.h
│ ├── block.h
│ ├── branch-mgr.c
│ ├── branch-mgr.h
│ ├── cdc/
│ │ ├── Makefile.am
│ │ ├── cdc.c
│ │ ├── cdc.h
│ │ ├── rabin-checksum.c
│ │ └── rabin-checksum.h
│ ├── commit-mgr.c
│ ├── commit-mgr.h
│ ├── common.h
│ ├── config-mgr.c
│ ├── config-mgr.h
│ ├── diff-simple.c
│ ├── diff-simple.h
│ ├── fs-mgr.c
│ ├── fs-mgr.h
│ ├── group-mgr.c
│ ├── group-mgr.h
│ ├── log.c
│ ├── log.h
│ ├── merge-new.c
│ ├── merge-new.h
│ ├── mq-mgr.c
│ ├── mq-mgr.h
│ ├── obj-backend-fs.c
│ ├── obj-backend-riak.c
│ ├── obj-backend.h
│ ├── obj-cache.c
│ ├── obj-cache.h
│ ├── obj-store.c
│ ├── obj-store.h
│ ├── object-list.c
│ ├── object-list.h
│ ├── org-mgr.c
│ ├── org-mgr.h
│ ├── password-hash.c
│ ├── password-hash.h
│ ├── processors/
│ │ └── objecttx-common.h
│ ├── redis-cache.c
│ ├── redis-cache.h
│ ├── rpc-service.c
│ ├── seaf-db.c
│ ├── seaf-db.h
│ ├── seaf-utils.c
│ ├── seaf-utils.h
│ ├── seafile-crypt.c
│ ├── seafile-crypt.h
│ ├── sync-repo-common.h
│ ├── user-mgr.c
│ ├── user-mgr.h
│ ├── vc-common.c
│ └── vc-common.h
├── configure.ac
├── controller/
│ ├── Makefile.am
│ ├── seafile-controller.c
│ └── seafile-controller.h
├── doc/
│ └── Makefile.am
├── fileserver/
│ ├── .golangci.yml
│ ├── blockmgr/
│ │ ├── blockmgr.go
│ │ └── blockmgr_test.go
│ ├── commitmgr/
│ │ ├── commitmgr.go
│ │ ├── commitmgr_test.go
│ │ └── null.go
│ ├── crypt.go
│ ├── diff/
│ │ ├── diff.go
│ │ └── diff_test.go
│ ├── fileop.go
│ ├── fileserver.go
│ ├── fsmgr/
│ │ ├── fsmgr.go
│ │ └── fsmgr_test.go
│ ├── go.mod
│ ├── go.sum
│ ├── http_code.go
│ ├── merge.go
│ ├── merge_test.go
│ ├── metrics/
│ │ └── metrics.go
│ ├── objstore/
│ │ ├── backend_fs.go
│ │ ├── objstore.go
│ │ └── objstore_test.go
│ ├── option/
│ │ └── option.go
│ ├── quota.go
│ ├── repomgr/
│ │ ├── repomgr.go
│ │ └── repomgr_test.go
│ ├── searpc/
│ │ ├── searpc.go
│ │ └── searpc_test.go
│ ├── share/
│ │ ├── group/
│ │ │ └── group.go
│ │ ├── public/
│ │ │ └── public.go
│ │ └── share.go
│ ├── size_sched.go
│ ├── sync_api.go
│ ├── utils/
│ │ ├── dup2.go
│ │ ├── dup3.go
│ │ ├── http.go
│ │ └── utils.go
│ ├── virtual_repo.go
│ └── workerpool/
│ └── workerpool.go
├── fuse/
│ ├── Makefile.am
│ ├── file.c
│ ├── getattr.c
│ ├── readdir.c
│ ├── repo-mgr.c
│ ├── repo-mgr.h
│ ├── seaf-fuse.c
│ ├── seaf-fuse.h
│ ├── seafile-session.c
│ └── seafile-session.h
├── include/
│ ├── Makefile.am
│ ├── seafile-error.h
│ └── seafile-rpc.h
├── lib/
│ ├── Makefile.am
│ ├── bloom-filter.c
│ ├── bloom-filter.h
│ ├── branch.vala
│ ├── ccnetobj.vala
│ ├── commit.vala
│ ├── copy-task.vala
│ ├── crypt.vala
│ ├── db.c
│ ├── db.h
│ ├── dir.vala
│ ├── dirent.vala
│ ├── file.vala
│ ├── include.h
│ ├── job-mgr.c
│ ├── job-mgr.h
│ ├── libseafile.pc.in
│ ├── net.c
│ ├── net.h
│ ├── repo.vala
│ ├── rpc_table.py
│ ├── seahub.vala
│ ├── search-result.vala
│ ├── task.vala
│ ├── timer.c
│ ├── timer.h
│ ├── utils.c
│ ├── utils.h
│ └── webaccess.vala
├── m4/
│ ├── ax_lib_sqlite3.m4
│ ├── glib-gettext.m4
│ └── python.m4
├── notification-server/
│ ├── .golangci.yml
│ ├── ccnet.conf
│ ├── client.go
│ ├── dup2.go
│ ├── dup3.go
│ ├── event.go
│ ├── go.mod
│ ├── go.sum
│ ├── logger.go
│ ├── server.go
│ └── subscriptions.go
├── pytest.ini
├── python/
│ ├── LICENSE.txt
│ ├── Makefile.am
│ ├── seafile/
│ │ ├── Makefile.am
│ │ ├── __init__.py
│ │ └── rpcclient.py
│ └── seaserv/
│ ├── Makefile.am
│ ├── __init__.py
│ ├── api.py
│ └── service.py
├── run_tests.sh
├── scripts/
│ ├── Makefile.am
│ ├── parse_seahub_db.py
│ └── sql/
│ ├── mysql/
│ │ ├── ccnet.sql
│ │ └── seafile.sql
│ └── sqlite/
│ ├── config.sql
│ ├── groupmgr.sql
│ ├── org.sql
│ ├── seafile.sql
│ └── user.sql
├── server/
│ ├── Makefile.am
│ ├── access-file.c
│ ├── access-file.h
│ ├── change-set.c
│ ├── change-set.h
│ ├── copy-mgr.c
│ ├── copy-mgr.h
│ ├── fileserver-config.c
│ ├── fileserver-config.h
│ ├── gc/
│ │ ├── Makefile.am
│ │ ├── fsck.c
│ │ ├── fsck.h
│ │ ├── gc-core.c
│ │ ├── gc-core.h
│ │ ├── repo-mgr.c
│ │ ├── repo-mgr.h
│ │ ├── seaf-fsck.c
│ │ ├── seafile-session.c
│ │ ├── seafile-session.h
│ │ ├── seafserv-gc.c
│ │ ├── verify.c
│ │ └── verify.h
│ ├── http-server.c
│ ├── http-server.h
│ ├── http-status-codes.h
│ ├── http-tx-mgr.c
│ ├── http-tx-mgr.h
│ ├── index-blocks-mgr.c
│ ├── index-blocks-mgr.h
│ ├── metric-mgr.c
│ ├── metric-mgr.h
│ ├── notif-mgr.c
│ ├── notif-mgr.h
│ ├── pack-dir.c
│ ├── pack-dir.h
│ ├── passwd-mgr.c
│ ├── passwd-mgr.h
│ ├── permission-mgr.c
│ ├── permission-mgr.h
│ ├── quota-mgr.c
│ ├── quota-mgr.h
│ ├── repo-mgr.c
│ ├── repo-mgr.h
│ ├── repo-op.c
│ ├── repo-perm.c
│ ├── seaf-server.c
│ ├── seafile-session.c
│ ├── seafile-session.h
│ ├── share-mgr.c
│ ├── share-mgr.h
│ ├── size-sched.c
│ ├── size-sched.h
│ ├── upload-file.c
│ ├── upload-file.h
│ ├── virtual-repo.c
│ ├── web-accesstoken-mgr.c
│ ├── web-accesstoken-mgr.h
│ ├── zip-download-mgr.c
│ └── zip-download-mgr.h
├── tests/
│ ├── __init__.py
│ ├── conf/
│ │ ├── ccnet.conf
│ │ └── mykey.peer
│ ├── config.py
│ ├── conftest.py
│ ├── test_file_operation/
│ │ ├── test_file_operation.py
│ │ ├── test_merge_virtual_repo.py
│ │ ├── test_search_files.py
│ │ ├── test_upload_and_update.py
│ │ ├── test_upload_large_files.py
│ │ └── test_zip_download.py
│ ├── test_file_property_and_dir_listing/
│ │ └── test_file_property_and_dir_listing.py
│ ├── test_gc/
│ │ └── test_gc.py
│ ├── test_get_repo_list/
│ │ └── test_get_repo_list.py
│ ├── test_group/
│ │ └── test_groups.py
│ ├── test_password/
│ │ └── test_password.py
│ ├── test_repo_manipulation/
│ │ └── test_repo_manipulation.py
│ ├── test_server_config/
│ │ └── test_server_config.py
│ ├── test_share_and_perm/
│ │ ├── test_shared_repo_perm.py
│ │ └── test_structure_repo_perm.py
│ ├── test_trashed_repos/
│ │ └── test_trashed_repos.py
│ ├── test_upload/
│ │ ├── account.conf
│ │ ├── go.mod
│ │ ├── go.sum
│ │ ├── readme.md
│ │ └── test_upload.go
│ ├── test_user/
│ │ └── test_users.py
│ └── utils.py
├── tools/
│ ├── Makefile.am
│ └── seafile-admin
└── updateversion.sh
================================================
FILE CONTENTS
================================================
================================================
FILE: .github/workflows/ci.yml
================================================
name: Seafile CI
on: [push, pull_request]
jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v1
with:
fetch-depth: 1
- uses: actions/setup-python@v3
with:
python-version: "3.12"
- name: install dependencies and test
run: |
cd $GITHUB_WORKSPACE
./ci/install-deps.sh
./ci/run.py
================================================
FILE: .github/workflows/golangci-lint.yml
================================================
name: golangci-lint
on: [push, pull_request]
permissions:
contents: read
# Optional: allow read access to pull request. Use with `only-new-issues` option.
# pull-requests: read
jobs:
golangci-fileserver:
name: lint
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/setup-go@v5
with:
go-version: "1.22"
- name: golangci-lint
uses: golangci/golangci-lint-action@v6
with:
version: v1.59
working-directory: ./fileserver
args: --timeout=5m
golangci-notification-server:
name: lint
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/setup-go@v5
with:
go-version: "1.22"
- name: golangci-lint
uses: golangci/golangci-lint-action@v6
with:
version: v1.59
working-directory: ./notification-server
args: --timeout=5m
================================================
FILE: .gitignore
================================================
*~
*.bak
*.o
*.exe
cscope*
*#
Makefile.in
ltmain.sh
libtool
*.lo
*.la
install-sh
depcomp
config.guess
config.h
config.log
config.status
config.sub
config.cache
configure
*/.deps
autom4te*
po/POTFILES
po/Makefile*
po/stamp-it
po/*.gmo
po/*.pot
missing
mkinstalldirs
stamp-h1
*.libs/
Makefile
aclocal.m4
*core
m4/intltool.m4
m4/libtool.m4
m4/ltoptions.m4
m4/ltsugar.m4
m4/ltversion.m4
m4/lt~obsolete.m4
ccnet-*.tar.gz
config.h.in
py-compile
intltool-extract.in
intltool-merge.in
intltool-update.in
*.stamp
*.pyc
*.tmp.ui
*.defs
*.log
.deps
*.db
*.dll
*.aps
*.so
build-stamp
debian/files
debian/seafile
debian/*.substvars
lib/searpc-marshal.h
lib/searpc-signature.h
lib/*.tmp
lib/dir.c
lib/dirent.c
lib/seafile-object.h
lib/task.c
lib/webaccess.c
lib/branch.c
lib/commit.c
lib/crypt.c
lib/repo.c
lib/copy-task.c
lib/search-result.c
seaf-server
seafserv-gc
seaf-migrate
seaf-fsck
seaf-fuse
controller/seafile-controller
tools/seaf-server-init
tests/conf/misc/
tests/conf/seafile-data/
tests/conf/ccnet.db
tests/conf/ccnet.sock
tests/conf/GroupMgr
tests/conf/OrgMgr
tests/conf/PeerMgr
*.dylib
.DS_Store
*.pc
*.tar.gz
/compile
/test-driver
*.dmp
/symbols
__pycache__/
.cache/
================================================
FILE: LICENSE.txt
================================================
This program is released under Affero GPLv3, with the following additional
permission to link with OpenSSL library.
If you modify this program, or any covered work, by linking or
combining it with the OpenSSL project's OpenSSL library (or a
modified version of that library), containing parts covered by the
terms of the OpenSSL or SSLeay licenses, Seafile Ltd.
grants you additional permission to convey the resulting work.
Corresponding Source for a non-source form of such a combination
shall include the source code for the parts of OpenSSL used as well
as that of the covered work.
The source code files under 'python' directory is released under
Apache License v2.0. You can find Apache License 2.0 file in that
directory.
GNU AFFERO GENERAL PUBLIC LICENSE
Version 3, 19 November 2007
Copyright © 2007 Free Software Foundation, Inc.
Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed.
Preamble
The GNU Affero General Public License is a free, copyleft license for software and other kinds of works, specifically designed to ensure cooperation with the community in the case of network server software.
The licenses for most software and other practical works are designed to take away your freedom to share and change the works. By contrast, our General Public Licenses are intended to guarantee your freedom to share and change all versions of a program--to make sure it remains free software for all its users.
When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for them if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs, and that you know you can do these things.
Developers that use our General Public Licenses protect your rights with two steps: (1) assert copyright on the software, and (2) offer you this License which gives you legal permission to copy, distribute and/or modify the software.
A secondary benefit of defending all users' freedom is that improvements made in alternate versions of the program, if they receive widespread use, become available for other developers to incorporate. Many developers of free software are heartened and encouraged by the resulting cooperation. However, in the case of software used on network servers, this result may fail to come about. The GNU General Public License permits making a modified version and letting the public access it on a server without ever releasing its source code to the public.
The GNU Affero General Public License is designed specifically to ensure that, in such cases, the modified source code becomes available to the community. It requires the operator of a network server to provide the source code of the modified version running there to the users of that server. Therefore, public use of a modified version, on a publicly accessible server, gives the public access to the source code of the modified version.
An older license, called the Affero General Public License and published by Affero, was designed to accomplish similar goals. This is a different license, not a version of the Affero GPL, but Affero has released a new version of the Affero GPL which permits relicensing under this license.
The precise terms and conditions for copying, distribution and modification follow.
TERMS AND CONDITIONS
0. Definitions.
"This License" refers to version 3 of the GNU Affero General Public License.
"Copyright" also means copyright-like laws that apply to other kinds of works, such as semiconductor masks.
"The Program" refers to any copyrightable work licensed under this License. Each licensee is addressed as "you". "Licensees" and "recipients" may be individuals or organizations.
To "modify" a work means to copy from or adapt all or part of the work in a fashion requiring copyright permission, other than the making of an exact copy. The resulting work is called a "modified version" of the earlier work or a work "based on" the earlier work.
A "covered work" means either the unmodified Program or a work based on the Program.
To "propagate" a work means to do anything with it that, without permission, would make you directly or secondarily liable for infringement under applicable copyright law, except executing it on a computer or modifying a private copy. Propagation includes copying, distribution (with or without modification), making available to the public, and in some countries other activities as well.
To "convey" a work means any kind of propagation that enables other parties to make or receive copies. Mere interaction with a user through a computer network, with no transfer of a copy, is not conveying.
An interactive user interface displays "Appropriate Legal Notices" to the extent that it includes a convenient and prominently visible feature that (1) displays an appropriate copyright notice, and (2) tells the user that there is no warranty for the work (except to the extent that warranties are provided), that licensees may convey the work under this License, and how to view a copy of this License. If the interface presents a list of user commands or options, such as a menu, a prominent item in the list meets this criterion.
1. Source Code.
The "source code" for a work means the preferred form of the work for making modifications to it. "Object code" means any non-source form of a work.
A "Standard Interface" means an interface that either is an official standard defined by a recognized standards body, or, in the case of interfaces specified for a particular programming language, one that is widely used among developers working in that language.
The "System Libraries" of an executable work include anything, other than the work as a whole, that (a) is included in the normal form of packaging a Major Component, but which is not part of that Major Component, and (b) serves only to enable use of the work with that Major Component, or to implement a Standard Interface for which an implementation is available to the public in source code form. A "Major Component", in this context, means a major essential component (kernel, window system, and so on) of the specific operating system (if any) on which the executable work runs, or a compiler used to produce the work, or an object code interpreter used to run it.
The "Corresponding Source" for a work in object code form means all the source code needed to generate, install, and (for an executable work) run the object code and to modify the work, including scripts to control those activities. However, it does not include the work's System Libraries, or general-purpose tools or generally available free programs which are used unmodified in performing those activities but which are not part of the work. For example, Corresponding Source includes interface definition files associated with source files for the work, and the source code for shared libraries and dynamically linked subprograms that the work is specifically designed to require, such as by intimate data communication or control flow between those subprograms and other parts of the work.
The Corresponding Source need not include anything that users can regenerate automatically from other parts of the Corresponding Source.
The Corresponding Source for a work in source code form is that same work.
2. Basic Permissions.
All rights granted under this License are granted for the term of copyright on the Program, and are irrevocable provided the stated conditions are met. This License explicitly affirms your unlimited permission to run the unmodified Program. The output from running a covered work is covered by this License only if the output, given its content, constitutes a covered work. This License acknowledges your rights of fair use or other equivalent, as provided by copyright law.
You may make, run and propagate covered works that you do not convey, without conditions so long as your license otherwise remains in force. You may convey covered works to others for the sole purpose of having them make modifications exclusively for you, or provide you with facilities for running those works, provided that you comply with the terms of this License in conveying all material for which you do not control copyright. Those thus making or running the covered works for you must do so exclusively on your behalf, under your direction and control, on terms that prohibit them from making any copies of your copyrighted material outside their relationship with you.
Conveying under any other circumstances is permitted solely under the conditions stated below. Sublicensing is not allowed; section 10 makes it unnecessary.
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
No covered work shall be deemed part of an effective technological measure under any applicable law fulfilling obligations under article 11 of the WIPO copyright treaty adopted on 20 December 1996, or similar laws prohibiting or restricting circumvention of such measures.
When you convey a covered work, you waive any legal power to forbid circumvention of technological measures to the extent such circumvention is effected by exercising rights under this License with respect to the covered work, and you disclaim any intention to limit operation or modification of the work as a means of enforcing, against the work's users, your or third parties' legal rights to forbid circumvention of technological measures.
4. Conveying Verbatim Copies.
You may convey verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice; keep intact all notices stating that this License and any non-permissive terms added in accord with section 7 apply to the code; keep intact all notices of the absence of any warranty; and give all recipients a copy of this License along with the Program.
You may charge any price or no price for each copy that you convey, and you may offer support or warranty protection for a fee.
5. Conveying Modified Source Versions.
You may convey a work based on the Program, or the modifications to produce it from the Program, in the form of source code under the terms of section 4, provided that you also meet all of these conditions:
a) The work must carry prominent notices stating that you modified it, and giving a relevant date.
b) The work must carry prominent notices stating that it is released under this License and any conditions added under section 7. This requirement modifies the requirement in section 4 to "keep intact all notices".
c) You must license the entire work, as a whole, under this License to anyone who comes into possession of a copy. This License will therefore apply, along with any applicable section 7 additional terms, to the whole of the work, and all its parts, regardless of how they are packaged. This License gives no permission to license the work in any other way, but it does not invalidate such permission if you have separately received it.
d) If the work has interactive user interfaces, each must display Appropriate Legal Notices; however, if the Program has interactive interfaces that do not display Appropriate Legal Notices, your work need not make them do so.
A compilation of a covered work with other separate and independent works, which are not by their nature extensions of the covered work, and which are not combined with it such as to form a larger program, in or on a volume of a storage or distribution medium, is called an "aggregate" if the compilation and its resulting copyright are not used to limit the access or legal rights of the compilation's users beyond what the individual works permit. Inclusion of a covered work in an aggregate does not cause this License to apply to the other parts of the aggregate.
6. Conveying Non-Source Forms.
You may convey a covered work in object code form under the terms of sections 4 and 5, provided that you also convey the machine-readable Corresponding Source under the terms of this License, in one of these ways:
a) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by the Corresponding Source fixed on a durable physical medium customarily used for software interchange.
b) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by a written offer, valid for at least three years and valid for as long as you offer spare parts or customer support for that product model, to give anyone who possesses the object code either (1) a copy of the Corresponding Source for all the software in the product that is covered by this License, on a durable physical medium customarily used for software interchange, for a price no more than your reasonable cost of physically performing this conveying of source, or (2) access to copy the Corresponding Source from a network server at no charge.
c) Convey individual copies of the object code with a copy of the written offer to provide the Corresponding Source. This alternative is allowed only occasionally and noncommercially, and only if you received the object code with such an offer, in accord with subsection 6b.
d) Convey the object code by offering access from a designated place (gratis or for a charge), and offer equivalent access to the Corresponding Source in the same way through the same place at no further charge. You need not require recipients to copy the Corresponding Source along with the object code. If the place to copy the object code is a network server, the Corresponding Source may be on a different server (operated by you or a third party) that supports equivalent copying facilities, provided you maintain clear directions next to the object code saying where to find the Corresponding Source. Regardless of what server hosts the Corresponding Source, you remain obligated to ensure that it is available for as long as needed to satisfy these requirements.
e) Convey the object code using peer-to-peer transmission, provided you inform other peers where the object code and Corresponding Source of the work are being offered to the general public at no charge under subsection 6d.
A separable portion of the object code, whose source code is excluded from the Corresponding Source as a System Library, need not be included in conveying the object code work.
A "User Product" is either (1) a "consumer product", which means any tangible personal property which is normally used for personal, family, or household purposes, or (2) anything designed or sold for incorporation into a dwelling. In determining whether a product is a consumer product, doubtful cases shall be resolved in favor of coverage. For a particular product received by a particular user, "normally used" refers to a typical or common use of that class of product, regardless of the status of the particular user or of the way in which the particular user actually uses, or expects or is expected to use, the product. A product is a consumer product regardless of whether the product has substantial commercial, industrial or non-consumer uses, unless such uses represent the only significant mode of use of the product.
"Installation Information" for a User Product means any methods, procedures, authorization keys, or other information required to install and execute modified versions of a covered work in that User Product from a modified version of its Corresponding Source. The information must suffice to ensure that the continued functioning of the modified object code is in no case prevented or interfered with solely because modification has been made.
If you convey an object code work under this section in, or with, or specifically for use in, a User Product, and the conveying occurs as part of a transaction in which the right of possession and use of the User Product is transferred to the recipient in perpetuity or for a fixed term (regardless of how the transaction is characterized), the Corresponding Source conveyed under this section must be accompanied by the Installation Information. But this requirement does not apply if neither you nor any third party retains the ability to install modified object code on the User Product (for example, the work has been installed in ROM).
The requirement to provide Installation Information does not include a requirement to continue to provide support service, warranty, or updates for a work that has been modified or installed by the recipient, or for the User Product in which it has been modified or installed. Access to a network may be denied when the modification itself materially and adversely affects the operation of the network or violates the rules and protocols for communication across the network.
Corresponding Source conveyed, and Installation Information provided, in accord with this section must be in a format that is publicly documented (and with an implementation available to the public in source code form), and must require no special password or key for unpacking, reading or copying.
7. Additional Terms.
"Additional permissions" are terms that supplement the terms of this License by making exceptions from one or more of its conditions. Additional permissions that are applicable to the entire Program shall be treated as though they were included in this License, to the extent that they are valid under applicable law. If additional permissions apply only to part of the Program, that part may be used separately under those permissions, but the entire Program remains governed by this License without regard to the additional permissions.
When you convey a copy of a covered work, you may at your option remove any additional permissions from that copy, or from any part of it. (Additional permissions may be written to require their own removal in certain cases when you modify the work.) You may place additional permissions on material, added by you to a covered work, for which you have or can give appropriate copyright permission.
Notwithstanding any other provision of this License, for material you add to a covered work, you may (if authorized by the copyright holders of that material) supplement the terms of this License with terms:
a) Disclaiming warranty or limiting liability differently from the terms of sections 15 and 16 of this License; or
b) Requiring preservation of specified reasonable legal notices or author attributions in that material or in the Appropriate Legal Notices displayed by works containing it; or
c) Prohibiting misrepresentation of the origin of that material, or requiring that modified versions of such material be marked in reasonable ways as different from the original version; or
d) Limiting the use for publicity purposes of names of licensors or authors of the material; or
e) Declining to grant rights under trademark law for use of some trade names, trademarks, or service marks; or
f) Requiring indemnification of licensors and authors of that material by anyone who conveys the material (or modified versions of it) with contractual assumptions of liability to the recipient, for any liability that these contractual assumptions directly impose on those licensors and authors.
All other non-permissive additional terms are considered "further restrictions" within the meaning of section 10. If the Program as you received it, or any part of it, contains a notice stating that it is governed by this License along with a term that is a further restriction, you may remove that term. If a license document contains a further restriction but permits relicensing or conveying under this License, you may add to a covered work material governed by the terms of that license document, provided that the further restriction does not survive such relicensing or conveying.
If you add terms to a covered work in accord with this section, you must place, in the relevant source files, a statement of the additional terms that apply to those files, or a notice indicating where to find the applicable terms.
Additional terms, permissive or non-permissive, may be stated in the form of a separately written license, or stated as exceptions; the above requirements apply either way.
8. Termination.
You may not propagate or modify a covered work except as expressly provided under this License. Any attempt otherwise to propagate or modify it is void, and will automatically terminate your rights under this License (including any patent licenses granted under the third paragraph of section 11).
However, if you cease all violation of this License, then your license from a particular copyright holder is reinstated (a) provisionally, unless and until the copyright holder explicitly and finally terminates your license, and (b) permanently, if the copyright holder fails to notify you of the violation by some reasonable means prior to 60 days after the cessation.
Moreover, your license from a particular copyright holder is reinstated permanently if the copyright holder notifies you of the violation by some reasonable means, this is the first time you have received notice of violation of this License (for any work) from that copyright holder, and you cure the violation prior to 30 days after your receipt of the notice.
Termination of your rights under this section does not terminate the licenses of parties who have received copies or rights from you under this License. If your rights have been terminated and not permanently reinstated, you do not qualify to receive new licenses for the same material under section 10.
9. Acceptance Not Required for Having Copies.
You are not required to accept this License in order to receive or run a copy of the Program. Ancillary propagation of a covered work occurring solely as a consequence of using peer-to-peer transmission to receive a copy likewise does not require acceptance. However, nothing other than this License grants you permission to propagate or modify any covered work. These actions infringe copyright if you do not accept this License. Therefore, by modifying or propagating a covered work, you indicate your acceptance of this License to do so.
10. Automatic Licensing of Downstream Recipients.
Each time you convey a covered work, the recipient automatically receives a license from the original licensors, to run, modify and propagate that work, subject to this License. You are not responsible for enforcing compliance by third parties with this License.
An "entity transaction" is a transaction transferring control of an organization, or substantially all assets of one, or subdividing an organization, or merging organizations. If propagation of a covered work results from an entity transaction, each party to that transaction who receives a copy of the work also receives whatever licenses to the work the party's predecessor in interest had or could give under the previous paragraph, plus a right to possession of the Corresponding Source of the work from the predecessor in interest, if the predecessor has it or can get it with reasonable efforts.
You may not impose any further restrictions on the exercise of the rights granted or affirmed under this License. For example, you may not impose a license fee, royalty, or other charge for exercise of rights granted under this License, and you may not initiate litigation (including a cross-claim or counterclaim in a lawsuit) alleging that any patent claim is infringed by making, using, selling, offering for sale, or importing the Program or any portion of it.
11. Patents.
A "contributor" is a copyright holder who authorizes use under this License of the Program or a work on which the Program is based. The work thus licensed is called the contributor's "contributor version".
A contributor's "essential patent claims" are all patent claims owned or controlled by the contributor, whether already acquired or hereafter acquired, that would be infringed by some manner, permitted by this License, of making, using, or selling its contributor version, but do not include claims that would be infringed only as a consequence of further modification of the contributor version. For purposes of this definition, "control" includes the right to grant patent sublicenses in a manner consistent with the requirements of this License.
Each contributor grants you a non-exclusive, worldwide, royalty-free patent license under the contributor's essential patent claims, to make, use, sell, offer for sale, import and otherwise run, modify and propagate the contents of its contributor version.
In the following three paragraphs, a "patent license" is any express agreement or commitment, however denominated, not to enforce a patent (such as an express permission to practice a patent or covenant not to sue for patent infringement). To "grant" such a patent license to a party means to make such an agreement or commitment not to enforce a patent against the party.
If you convey a covered work, knowingly relying on a patent license, and the Corresponding Source of the work is not available for anyone to copy, free of charge and under the terms of this License, through a publicly available network server or other readily accessible means, then you must either (1) cause the Corresponding Source to be so available, or (2) arrange to deprive yourself of the benefit of the patent license for this particular work, or (3) arrange, in a manner consistent with the requirements of this License, to extend the patent license to downstream recipients. "Knowingly relying" means you have actual knowledge that, but for the patent license, your conveying the covered work in a country, or your recipient's use of the covered work in a country, would infringe one or more identifiable patents in that country that you have reason to believe are valid.
If, pursuant to or in connection with a single transaction or arrangement, you convey, or propagate by procuring conveyance of, a covered work, and grant a patent license to some of the parties receiving the covered work authorizing them to use, propagate, modify or convey a specific copy of the covered work, then the patent license you grant is automatically extended to all recipients of the covered work and works based on it.
A patent license is "discriminatory" if it does not include within the scope of its coverage, prohibits the exercise of, or is conditioned on the non-exercise of one or more of the rights that are specifically granted under this License. You may not convey a covered work if you are a party to an arrangement with a third party that is in the business of distributing software, under which you make payment to the third party based on the extent of your activity of conveying the work, and under which the third party grants, to any of the parties who would receive the covered work from you, a discriminatory patent license (a) in connection with copies of the covered work conveyed by you (or copies made from those copies), or (b) primarily for and in connection with specific products or compilations that contain the covered work, unless you entered into that arrangement, or that patent license was granted, prior to 28 March 2007.
Nothing in this License shall be construed as excluding or limiting any implied license or other defenses to infringement that may otherwise be available to you under applicable patent law.
12. No Surrender of Others' Freedom.
If conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot convey a covered work so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not convey it at all. For example, if you agree to terms that obligate you to collect a royalty for further conveying from those to whom you convey the Program, the only way you could satisfy both those terms and this License would be to refrain entirely from conveying the Program.
13. Remote Network Interaction; Use with the GNU General Public License.
Notwithstanding any other provision of this License, if you modify the Program, your modified version must prominently offer all users interacting with it remotely through a computer network (if your version supports such interaction) an opportunity to receive the Corresponding Source of your version by providing access to the Corresponding Source from a network server at no charge, through some standard or customary means of facilitating copying of software. This Corresponding Source shall include the Corresponding Source for any work covered by version 3 of the GNU General Public License that is incorporated pursuant to the following paragraph.
Notwithstanding any other provision of this License, you have permission to link or combine any covered work with a work licensed under version 3 of the GNU General Public License into a single combined work, and to convey the resulting work. The terms of this License will continue to apply to the part which is the covered work, but the work with which it is combined will remain governed by version 3 of the GNU General Public License.
14. Revised Versions of this License.
The Free Software Foundation may publish revised and/or new versions of the GNU Affero General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns.
Each version is given a distinguishing version number. If the Program specifies that a certain numbered version of the GNU Affero General Public License "or any later version" applies to it, you have the option of following the terms and conditions either of that numbered version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of the GNU Affero General Public License, you may choose any version ever published by the Free Software Foundation.
If the Program specifies that a proxy can decide which future versions of the GNU Affero General Public License can be used, that proxy's public statement of acceptance of a version permanently authorizes you to choose that version for the Program.
Later license versions may give you additional or different permissions. However, no additional obligations are imposed on any author or copyright holder as a result of your choosing to follow a later version.
15. Disclaimer of Warranty.
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
16. Limitation of Liability.
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
17. Interpretation of Sections 15 and 16.
If the disclaimer of warranty and limitation of liability provided above cannot be given local legal effect according to their terms, reviewing courts shall apply local law that most closely approximates an absolute waiver of all civil liability in connection with the Program, unless a warranty or assumption of liability accompanies a copy of the Program in return for a fee.
END OF TERMS AND CONDITIONS
How to Apply These Terms to Your New Programs
If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms.
To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively state the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found.
Copyright (C)
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see .
Also add information on how to contact you by electronic and paper mail.
If your software can interact with users remotely through a computer network, you should also make sure that it provides a way for users to get its source. For example, if your program is a web application, its interface could display a "Source" link that leads users to an archive of the code. There are many ways you could offer source, and different solutions will be better for different programs; see section 13 for the specific requirements.
You should also get your employer (if you work as a programmer) or school, if any, to sign a "copyright disclaimer" for the program, if necessary. For more information on this, and how to apply and follow the GNU AGPL, see .
================================================
FILE: Makefile.am
================================================
MAKE_CLIENT =
if WIN32
MAKE_CONTROLLER =
else
MAKE_CONTROLLER = controller
endif
if COMPILE_FUSE
MAKE_FUSE = fuse
else
MAKE_FUSE =
endif
MAKE_SERVER = server tools $(MAKE_CONTROLLER) $(MAKE_FUSE)
SUBDIRS = include lib common python $(MAKE_SERVER) doc scripts
DIST_SUBDIRS = include lib common python server tools controller fuse doc scripts
INTLTOOL = \
intltool-extract.in \
intltool-merge.in \
intltool-update.in
EXTRA_DIST = install-sh $(INTLTOOL) README.markdown scripts LICENSE.txt
ACLOCAL_AMFLAGS = -I m4
dist-hook:
git log --format='%H' -1 > $(distdir)/latest_commit
================================================
FILE: README.markdown
================================================
Seafile Server Core [](http://travis-ci.org/haiwen/seafile-server)
============
Seafile is an open source cloud storage system with features on privacy protection and teamwork. Collections of files are called libraries, and each library can be synced separately. A library can also be encrypted with a user chosen password. Seafile also allows users to create groups and easily sharing files into groups.
This is the core component of Seafile server. It provides RPC to the web front-end (Seahub) to access files, and provides HTTP APIs to the desktop clients for syncing files.
Build and Run
=============
See
Contributing
===========
For more informations read [Contribution](https://manual.seafile.com/contribution/).
License
=======
The Seafile server core is published under AGPLv3. Other components of Seafile have different licenses. Please refer to the coresponding projects.
Contact
=======
Twitter: @seafile
Forum:
================================================
FILE: README.testing.md
================================================
# Seafile Server Tests
## Run it locally
To run the tests, you need to install pytest first:
```sh
pip install -r ci/requirements.txt
```
Compile and install ccnet-server and seafile-server
```
cd ccnet-server
make
sudo make install
cd seafile-server
make
sudo make install
```
Then run the tests with
```sh
cd seafile-server
./run_tests.sh
```
By default the test script would try to start ccnet-server and seaf-server in `/usr/local/bin`, if you `make install` to another location, say `/opt/local`, run it like this:
```sh
SEAFILE_INSTALL_PREFIX=/opt/local ./run_tests.sh
```
================================================
FILE: autogen.sh
================================================
#!/bin/bash
# Run this to generate all the initial makefiles, etc.
: ${AUTOCONF=autoconf}
: ${AUTOHEADER=autoheader}
: ${AUTOMAKE=automake}
: ${ACLOCAL=aclocal}
if test "$(uname)" != "Darwin"; then
: ${LIBTOOLIZE=libtoolize}
else
: ${LIBTOOLIZE=glibtoolize}
fi
: ${INTLTOOLIZE=intltoolize}
: ${LIBTOOL=libtool}
srcdir=`dirname $0`
test -z "$srcdir" && srcdir=.
ORIGDIR=`pwd`
cd $srcdir
PROJECT=ccnet
TEST_TYPE=-f
FILE=net/main.c
CONFIGURE=configure.ac
DIE=0
($AUTOCONF --version) < /dev/null > /dev/null 2>&1 || {
echo
echo "You must have autoconf installed to compile $PROJECT."
echo "Download the appropriate package for your distribution,"
echo "or get the source tarball at ftp://ftp.gnu.org/pub/gnu/"
DIE=1
}
(grep "^AC_PROG_INTLTOOL" $srcdir/$CONFIGURE >/dev/null) && {
($INTLTOOLIZE --version) < /dev/null > /dev/null 2>&1 || {
echo
echo "You must have \`intltoolize' installed to compile $PROJECT."
echo "Get ftp://ftp.gnome.org/pub/GNOME/stable/sources/intltool/intltool-0.22.tar.gz"
echo "(or a newer version if it is available)"
DIE=1
}
}
($AUTOMAKE --version) < /dev/null > /dev/null 2>&1 || {
echo
echo "You must have automake installed to compile $PROJECT."
echo "Get ftp://sourceware.cygnus.com/pub/automake/automake-1.7.tar.gz"
echo "(or a newer version if it is available)"
DIE=1
}
if test "$(uname)" != "Darwin"; then
(grep "^AC_PROG_LIBTOOL" $CONFIGURE >/dev/null) && {
($LIBTOOL --version) < /dev/null > /dev/null 2>&1 || {
echo
echo "**Error**: You must have \`libtool' installed to compile $PROJECT."
echo "Get ftp://ftp.gnu.org/pub/gnu/libtool-1.4.tar.gz"
echo "(or a newer version if it is available)"
DIE=1
}
}
fi
if grep "^AM_[A-Z0-9_]\{1,\}_GETTEXT" "$CONFIGURE" >/dev/null; then
if grep "sed.*POTFILES" "$CONFIGURE" >/dev/null; then
GETTEXTIZE=""
else
if grep "^AM_GLIB_GNU_GETTEXT" "$CONFIGURE" >/dev/null; then
GETTEXTIZE="glib-gettextize"
GETTEXTIZE_URL="ftp://ftp.gtk.org/pub/gtk/v2.0/glib-2.0.0.tar.gz"
else
GETTEXTIZE="gettextize"
GETTEXTIZE_URL="ftp://alpha.gnu.org/gnu/gettext-0.10.35.tar.gz"
fi
$GETTEXTIZE --version < /dev/null > /dev/null 2>&1
if test $? -ne 0; then
echo
echo "**Error**: You must have \`$GETTEXTIZE' installed to compile $PKG_NAME."
echo "Get $GETTEXTIZE_URL"
echo "(or a newer version if it is available)"
DIE=1
fi
fi
fi
if test "$DIE" -eq 1; then
exit 1
fi
dr=`dirname .`
echo processing $dr
aclocalinclude="$aclocalinclude -I m4"
if test x"$MSYSTEM" = x"MINGW32"; then
aclocalinclude="$aclocalinclude -I /mingw32/share/aclocal"
elif test "$(uname)" = "Darwin"; then
aclocalinclude="$aclocalinclude -I /opt/local/share/aclocal"
fi
echo "Creating $dr/aclocal.m4 ..."
test -r $dr/aclocal.m4 || touch $dr/aclocal.m4
echo "Running glib-gettextize... Ignore non-fatal messages."
echo "no" | glib-gettextize --force --copy
echo "Making $dr/aclocal.m4 writable ..."
test -r $dr/aclocal.m4 && chmod u+w $dr/aclocal.m4
echo "Running intltoolize..."
intltoolize --copy --force --automake
echo "Running $LIBTOOLIZE..."
$LIBTOOLIZE --force --copy
echo "Running $ACLOCAL $aclocalinclude ..."
$ACLOCAL $aclocalinclude
echo "Running $AUTOHEADER..."
$AUTOHEADER
echo "Running $AUTOMAKE --gnu $am_opt ..."
$AUTOMAKE --add-missing --gnu $am_opt
echo "Running $AUTOCONF ..."
$AUTOCONF
================================================
FILE: ci/install-deps.sh
================================================
#!/bin/bash
set -e -x
SCRIPT=${BASH_SOURCE[0]}
TESTS_DIR=$(dirname "${SCRIPT}")/..
SETUP_DIR=${TESTS_DIR}/ci
cd $SETUP_DIR
sudo systemctl start mysql.service
sudo apt-get update --fix-missing
sudo apt-get install -y intltool libarchive-dev libcurl4-openssl-dev libevent-dev \
libfuse-dev libglib2.0-dev libjansson-dev libmysqlclient-dev libonig-dev \
sqlite3 libsqlite3-dev libtool net-tools uuid-dev valac libargon2-dev
sudo systemctl start mysql.service
pip install -r requirements.txt
================================================
FILE: ci/requirements.txt
================================================
termcolor>=1.1.0
requests>=2.8.0
pytest>=3.3.2
backports.functools_lru_cache>=1.4
tenacity>=4.8.0
future
requests-toolbelt
================================================
FILE: ci/run.py
================================================
#!/usr/bin/env python
"""
Install dir: ~/opt/local
Data dir: /tmp/haiwen
"""
import argparse
import glob
import json
import logging
import os
import re
import sys
from os.path import abspath, basename, exists, expanduser, join
import requests
import termcolor
import site
from serverctl import ServerCtl
from utils import (
cd, chdir, debug, green, info, lru_cache, mkdirs, on_github_actions, red,
setup_logging, shell, warning
)
logger = logging.getLogger(__name__)
TOPDIR = abspath(join(os.getcwd(), '..'))
if on_github_actions():
PREFIX = expanduser('~/opt/local')
else:
PREFIX = os.environ.get('SEAFILE_INSTALL_PREFIX', '/usr/local')
INSTALLDIR = '/tmp/seafile-tests'
def num_jobs():
return int(os.environ.get('NUM_JOBS', 2))
@lru_cache()
def make_build_env():
env = dict(os.environ)
libsearpc_dir = abspath(join(TOPDIR, 'libsearpc'))
ccnet_dir = abspath(join(TOPDIR, 'ccnet-server'))
def _env_add(*a, **kw):
kw['env'] = env
return prepend_env_value(*a, **kw)
_env_add('CPPFLAGS', '-I%s' % join(PREFIX, 'include'), seperator=' ')
_env_add('LDFLAGS', '-L%s' % join(PREFIX, 'lib'), seperator=' ')
_env_add('LDFLAGS', '-L%s' % join(PREFIX, 'lib64'), seperator=' ')
_env_add('PATH', join(PREFIX, 'bin'))
py_version = '.'.join(map(str, sys.version_info[:3]))
if on_github_actions():
_env_add('PYTHONPATH', join(os.environ.get('RUNNER_TOOL_CACHE'), 'Python/{py_version}/x64/lib/python3.12/site-packages'))
_env_add('PYTHONPATH', join(PREFIX, 'lib/python3.12/site-packages'))
_env_add('PKG_CONFIG_PATH', join(PREFIX, 'lib', 'pkgconfig'))
_env_add('PKG_CONFIG_PATH', join(PREFIX, 'lib64', 'pkgconfig'))
_env_add('PKG_CONFIG_PATH', libsearpc_dir)
_env_add('PKG_CONFIG_PATH', ccnet_dir)
_env_add('LD_LIBRARY_PATH', join(PREFIX, 'lib'))
_env_add('JWT_PRIVATE_KEY', '@%ukmcl$k=9u-grs4azdljk(sn0kd!=mzc17xd7x8#!u$1x@kl')
_env_add('SEAFILE_MYSQL_DB_CCNET_DB_NAME', 'ccnet')
# Prepend the seafile-server/python to PYTHONPATH so we don't need to "make
# install" each time after editing python files.
_env_add('PYTHONPATH', join(SeafileServer().projectdir, 'python'))
for key in ('PATH', 'PKG_CONFIG_PATH', 'CPPFLAGS', 'LDFLAGS', 'PYTHONPATH'):
info('%s: %s', key, env.get(key, ''))
return env
def prepend_env_value(name, value, seperator=':', env=None):
'''append a new value to a list'''
env = env or os.environ
current_value = env.get(name, '')
new_value = value
if current_value:
new_value += seperator + current_value
env[name] = new_value
return env
@lru_cache()
def get_branch_json_file():
url = 'https://raw.githubusercontent.com/haiwen/seafile-test-deploy/master/branches.json'
return requests.get(url).json()
def get_project_branch(project, default_branch='master'):
travis_branch = os.environ.get('TRAVIS_BRANCH', 'master')
if project.name == 'seafile-server':
return travis_branch
conf = get_branch_json_file()
return conf.get(travis_branch, {}).get(project.name, default_branch)
class Project(object):
def __init__(self, name):
self.name = name
self.version = ''
@property
def url(self):
return 'https://www.github.com/haiwen/{}.git'.format(self.name)
@property
def projectdir(self):
return join(TOPDIR, self.name)
def branch(self):
return get_project_branch(self)
def clone(self):
if exists(self.name):
with cd(self.name):
shell('git fetch origin --tags')
else:
shell(
'git clone --depth=1 --branch {} {}'.
format(self.branch(), self.url)
)
@chdir
def compile_and_install(self):
cmds = [
'./autogen.sh',
'./configure --prefix={}'.format(PREFIX),
'make -j{} V=0'.format(num_jobs()),
'make install',
]
for cmd in cmds:
shell(cmd)
@chdir
def use_branch(self, branch):
shell('git checkout {}'.format(branch))
class Libsearpc(Project):
def __init__(self):
super(Libsearpc, self).__init__('libsearpc')
def branch(self):
return 'master'
class CcnetServer(Project):
def __init__(self):
super(CcnetServer, self).__init__('ccnet-server')
def branch(self):
return '7.1'
class SeafileServer(Project):
def __init__(self):
super(SeafileServer, self).__init__('seafile-server')
class Libevhtp(Project):
def __init__(self):
super(Libevhtp, self).__init__('libevhtp')
def branch(self):
return 'master'
@chdir
def compile_and_install(self):
cmds = [
'cmake -DEVHTP_DISABLE_SSL=ON -DEVHTP_BUILD_SHARED=OFF -DCMAKE_POLICY_VERSION_MINIMUM=3.5 .',
'make',
'sudo make install',
'sudo ldconfig',
]
for cmd in cmds:
shell(cmd)
class Libjwt(Project):
def __init__(self):
super(Libjwt, self).__init__('libjwt')
def branch(self):
return 'v1.13.1'
@property
def url(self):
return 'https://www.github.com/benmcollins/libjwt.git'
@chdir
def compile_and_install(self):
cmds = [
'autoreconf -i',
'./configure',
'sudo make all',
'sudo make install',
]
for cmd in cmds:
shell(cmd)
class Libhiredis(Project):
def __init__(self):
super(Libhiredis, self).__init__('hiredis')
def branch(self):
return 'v1.1.0'
@property
def url(self):
return 'https://github.com/redis/hiredis.git'
@chdir
def compile_and_install(self):
cmds = [
'sudo make',
'sudo make install',
]
for cmd in cmds:
shell(cmd)
def fetch_and_build():
libsearpc = Libsearpc()
libjwt = Libjwt()
libhiredis = Libhiredis()
libevhtp = Libevhtp()
ccnet = CcnetServer()
seafile = SeafileServer()
libsearpc.clone()
libjwt.clone()
libhiredis.clone()
libevhtp.clone()
ccnet.clone()
libsearpc.compile_and_install()
libjwt.compile_and_install()
libhiredis.compile_and_install()
libevhtp.compile_and_install()
seafile.compile_and_install()
def parse_args():
ap = argparse.ArgumentParser()
ap.add_argument('-v', '--verbose', action='store_true')
ap.add_argument('-t', '--test-only', action='store_true')
return ap.parse_args()
def main():
mkdirs(INSTALLDIR)
os.environ.update(make_build_env())
args = parse_args()
if on_github_actions() and not args.test_only:
fetch_and_build()
dbs = ('mysql',)
for db in dbs:
start_and_test_with_db(db)
def start_and_test_with_db(db):
if db == 'sqlite3':
fileservers = ('c_fileserver',)
else:
fileservers = ('go_fileserver', 'c_fileserver')
for fileserver in fileservers:
shell('rm -rf {}/*'.format(INSTALLDIR))
info('Setting up seafile server with %s database, use %s', db, fileserver)
server = ServerCtl(
TOPDIR,
SeafileServer().projectdir,
INSTALLDIR,
fileserver,
db=db,
# Use the newly built seaf-server (to avoid "make install" each time when developping locally)
seaf_server_bin=join(SeafileServer().projectdir, 'server/seaf-server')
)
server.setup()
with server.run():
info('Testing with %s database', db)
with cd(SeafileServer().projectdir):
shell('py.test', env=server.get_seaserv_envs())
if __name__ == '__main__':
os.chdir(TOPDIR)
setup_logging()
main()
================================================
FILE: ci/serverctl.py
================================================
#!/usr/bin/env python
#coding: UTF-8
import argparse
import glob
import logging
import os
import re
import sys
from collections import namedtuple
from contextlib import contextmanager
from os.path import abspath, basename, dirname, exists, join
import requests
from tenacity import TryAgain, retry, stop_after_attempt, wait_fixed
from utils import (
cd, chdir, debug, green, info, mkdirs, red, setup_logging, shell, warning
)
logger = logging.getLogger(__name__)
class ServerCtl(object):
def __init__(self, topdir, projectdir, datadir, fileserver, db='sqlite3', seaf_server_bin='seaf-server', ccnet_server_bin='ccnet-server'):
self.db = db
self.topdir = topdir
self.datadir = datadir
self.central_conf_dir = join(datadir, 'conf')
self.seafile_conf_dir = join(datadir, 'seafile-data')
self.ccnet_conf_dir = join(datadir, 'ccnet')
self.log_dir = join(datadir, 'logs')
mkdirs(self.log_dir)
self.ccnet_log = join(self.log_dir, 'ccnet.log')
self.seafile_log = join(self.log_dir, 'seafile.log')
self.fileserver_log = join(self.log_dir, 'fileserver.log')
self.ccnet_server_bin = ccnet_server_bin
self.seaf_server_bin = seaf_server_bin
self.sql_dir = join(topdir, 'seafile-server', 'scripts', 'sql')
self.ccnet_proc = None
self.seafile_proc = None
self.fileserver_proc = None
self.projectdir = projectdir
self.fileserver = fileserver
def setup(self):
if self.db == 'mysql':
create_mysql_dbs()
os.mkdir (self.central_conf_dir, 0o755)
os.mkdir (self.seafile_conf_dir, 0o755)
os.mkdir (self.ccnet_conf_dir, 0o755)
self.init_seafile()
def init_seafile(self):
seafile_conf = join(self.central_conf_dir, 'seafile.conf')
if self.fileserver == 'go_fileserver':
seafile_fileserver_conf = '''\
[fileserver]
use_go_fileserver = true
port=8082
'''
else:
seafile_fileserver_conf = '''\
[fileserver]
port=8082
'''
with open(seafile_conf, 'a+') as fp:
fp.write('\n')
fp.write(seafile_fileserver_conf)
if self.db == 'mysql':
self.add_seafile_db_conf()
else:
self.add_seafile_sqlite_db_conf()
def add_seafile_sqlite_db_conf(self):
seafile_conf = join(self.central_conf_dir, 'seafile.conf')
seafile_db_conf = '''\
[database]
'''
with open(seafile_conf, 'a+') as fp:
fp.write('\n')
fp.write(seafile_db_conf)
def add_seafile_db_conf(self):
seafile_conf = join(self.central_conf_dir, 'seafile.conf')
seafile_db_conf = '''\
[database]
type = mysql
host = 127.0.0.1
port = 3306
user = seafile
password = seafile
db_name = seafile
connection_charset = utf8
'''
with open(seafile_conf, 'a+') as fp:
fp.write('\n')
fp.write(seafile_db_conf)
@contextmanager
def run(self):
try:
self.start()
yield self
except:
self.print_logs()
raise
finally:
self.stop()
def print_logs(self):
for logfile in self.ccnet_log, self.seafile_log:
if exists(logfile):
shell(f'cat {logfile}')
@retry(wait=wait_fixed(1), stop=stop_after_attempt(10))
def wait_ccnet_ready(self):
if not exists(join(self.ccnet_conf_dir, 'ccnet-rpc.sock')):
raise TryAgain
def start(self):
logger.info('Starting to create ccnet and seafile db tables')
self.create_database_tables()
logger.info('Starting seafile server')
self.start_seafile()
self.start_fileserver()
def create_database_tables(self):
if self.db == 'mysql':
ccnet_sql_path = join(self.sql_dir, 'mysql', 'ccnet.sql')
seafile_sql_path = join(self.sql_dir, 'mysql', 'seafile.sql')
sql = f'USE ccnet; source {ccnet_sql_path}; USE seafile; source {seafile_sql_path};'.encode()
shell('sudo mysql -u root -proot', inputdata=sql, wait=False)
else:
config_sql_path = join(self.sql_dir, 'sqlite', 'config.sql')
groupmgr_sql_path = join(self.sql_dir, 'sqlite', 'groupmgr.sql')
org_sql_path = join(self.sql_dir, 'sqlite', 'org.sql')
user_sql_path = join(self.sql_dir, 'sqlite', 'user.sql')
seafile_sql_path = join(self.sql_dir, 'sqlite', 'seafile.sql')
misc_dir = join(self.ccnet_conf_dir, 'misc')
os.mkdir (misc_dir, 0o755)
groupmgr_dir = join(self.ccnet_conf_dir, 'GroupMgr')
os.mkdir (groupmgr_dir, 0o755)
orgmgr_dir = join(self.ccnet_conf_dir, 'OrgMgr')
os.mkdir (orgmgr_dir, 0o755)
usermgr_dir = join(self.ccnet_conf_dir, 'PeerMgr')
os.mkdir (usermgr_dir, 0o755)
config_db_path = join(misc_dir, 'config.db')
groupmgr_db_path = join(groupmgr_dir, 'groupmgr.db')
orgmgr_db_path = join(orgmgr_dir, 'orgmgr.db')
usermgr_db_path = join(usermgr_dir, 'usermgr.db')
seafile_db_path = join(self.seafile_conf_dir, 'seafile.db')
sql = f'.read {config_sql_path}'.encode()
shell('sqlite3 ' + config_db_path, inputdata=sql, wait=False)
sql = f'.read {groupmgr_sql_path}'.encode()
shell('sqlite3 ' + groupmgr_db_path, inputdata=sql, wait=False)
sql = f'.read {org_sql_path}'.encode()
shell('sqlite3 ' + orgmgr_db_path, inputdata=sql, wait=False)
sql = f'.read {user_sql_path}'.encode()
shell('sqlite3 ' + usermgr_db_path, inputdata=sql, wait=False)
sql = f'.read {seafile_sql_path}'.encode()
shell('sqlite3 ' + seafile_db_path, inputdata=sql, wait=False)
def start_ccnet(self):
cmd = [
self.ccnet_server_bin,
"-F",
self.central_conf_dir,
"-c",
self.ccnet_conf_dir,
"-f",
self.ccnet_log,
]
self.ccnet_proc = shell(cmd, wait=False)
def start_seafile(self):
cmd = [
self.seaf_server_bin,
"-F",
self.central_conf_dir,
"-c",
self.ccnet_conf_dir,
"-d",
self.seafile_conf_dir,
"-l",
self.seafile_log,
"-f",
]
self.seafile_proc = shell(cmd, wait=False)
def start_fileserver(self):
cmd = [
"./fileserver",
"-F",
self.central_conf_dir,
"-d",
self.seafile_conf_dir,
"-l",
self.fileserver_log,
]
fileserver_path = join(self.projectdir, 'fileserver')
with cd(fileserver_path):
shell("go build")
self.fileserver_proc = shell(cmd, wait=False)
def stop(self):
if self.ccnet_proc:
logger.info('Stopping ccnet server')
self.ccnet_proc.kill()
if self.seafile_proc:
logger.info('Stopping seafile server')
self.seafile_proc.kill()
if self.fileserver_proc:
logger.info('Stopping go fileserver')
self.fileserver_proc.kill()
if self.db == 'mysql':
del_mysql_dbs()
def get_seaserv_envs(self):
envs = dict(os.environ)
envs.update({
'SEAFILE_CENTRAL_CONF_DIR': self.central_conf_dir,
'CCNET_CONF_DIR': self.ccnet_conf_dir,
'SEAFILE_CONF_DIR': self.seafile_conf_dir,
'SEAFILE_MYSQL_DB_CCNET_DB_NAME': 'ccnet',
})
return envs
def create_mysql_dbs():
sql = b'''\
create database `ccnet` character set = 'utf8';
create database `seafile` character set = 'utf8';
create user 'seafile'@'localhost' identified by 'seafile';
GRANT ALL PRIVILEGES ON `ccnet`.* to `seafile`@localhost;
GRANT ALL PRIVILEGES ON `seafile`.* to `seafile`@localhost;
'''
shell('sudo mysql -u root -proot', inputdata=sql)
def del_mysql_dbs():
sql = b'''\
drop database `ccnet`;
drop database `seafile`;
drop user 'seafile'@'localhost';
'''
shell('sudo mysql -u root -proot', inputdata=sql)
================================================
FILE: ci/utils.py
================================================
#coding: UTF-8
import logging
import os
import re
import sys
from contextlib import contextmanager
from os.path import abspath, basename, exists, expanduser, join
from subprocess import PIPE, CalledProcessError, Popen
import requests
import termcolor
try:
from functools import lru_cache
except ImportError:
from backports.functools_lru_cache import lru_cache
logger = logging.getLogger(__name__)
def _color(s, color):
return s if not os.isatty(sys.stdout.fileno()) \
else termcolor.colored(str(s), color)
def green(s):
return _color(s, 'green')
def red(s):
return _color(s, 'red')
def debug(fmt, *a):
logger.debug(green(fmt), *a)
def info(fmt, *a):
logger.info(green(fmt), *a)
def warning(fmt, *a):
logger.warn(red(fmt), *a)
def shell(cmd, inputdata=None, wait=True, **kw):
info('calling "%s" in %s', cmd, kw.get('cwd', os.getcwd()))
kw['shell'] = not isinstance(cmd, list)
kw['stdin'] = PIPE if inputdata else None
p = Popen(cmd, **kw)
if inputdata:
p.communicate(inputdata)
if wait:
p.wait()
if p.returncode:
raise CalledProcessError(p.returncode, cmd)
else:
return p
@contextmanager
def cd(path):
olddir = os.getcwd()
os.chdir(path)
try:
yield
finally:
os.chdir(olddir)
def chdir(func):
def wrapped(self, *w, **kw):
with cd(self.projectdir):
return func(self, *w, **kw)
return wrapped
def setup_logging():
kw = {
'format': '[%(asctime)s][%(module)s]: %(message)s',
'datefmt': '%m/%d/%Y %H:%M:%S',
'level': logging.DEBUG,
'stream': sys.stdout,
}
logging.basicConfig(**kw)
logging.getLogger('requests.packages.urllib3.connectionpool'
).setLevel(logging.WARNING)
def mkdirs(*paths):
for path in paths:
if not exists(path):
os.mkdir(path)
def on_github_actions():
return 'GITHUB_ACTIONS' in os.environ
@contextmanager
def cd(path):
path = expanduser(path)
olddir = os.getcwd()
os.chdir(path)
try:
yield
finally:
os.chdir(olddir)
================================================
FILE: common/Makefile.am
================================================
SUBDIRS = cdc
proc_headers = \
$(addprefix processors/, \
objecttx-common.h)
noinst_HEADERS = \
diff-simple.h \
seafile-crypt.h \
password-hash.h \
common.h \
branch-mgr.h \
fs-mgr.h \
block-mgr.h \
commit-mgr.h \
log.h \
object-list.h \
vc-common.h \
seaf-utils.h \
obj-store.h \
obj-backend.h \
block-backend.h \
block.h \
mq-mgr.h \
seaf-db.h \
config-mgr.h \
merge-new.h \
block-tx-utils.h \
sync-repo-common.h \
$(proc_headers)
================================================
FILE: common/block-backend-fs.c
================================================
/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
#ifndef _WIN32_WINNT
#define _WIN32_WINNT 0x500
#endif
#include "common.h"
#include "utils.h"
#include "log.h"
#include
#include
#include
#include "block-backend.h"
#include "obj-store.h"
struct _BHandle {
char *store_id;
int version;
char block_id[41];
int fd;
int rw_type;
char *tmp_file;
};
typedef struct {
char *block_dir;
int block_dir_len;
char *tmp_dir;
int tmp_dir_len;
} FsPriv;
static char *
get_block_path (BlockBackend *bend,
const char *block_sha1,
char path[],
const char *store_id,
int version);
static int
open_tmp_file (BlockBackend *bend,
const char *basename,
char **path);
static BHandle *
block_backend_fs_open_block (BlockBackend *bend,
const char *store_id,
int version,
const char *block_id,
int rw_type)
{
BHandle *handle;
int fd = -1;
char *tmp_file;
g_return_val_if_fail (block_id != NULL, NULL);
g_return_val_if_fail (strlen(block_id) == 40, NULL);
g_return_val_if_fail (rw_type == BLOCK_READ || rw_type == BLOCK_WRITE, NULL);
if (rw_type == BLOCK_READ) {
char path[SEAF_PATH_MAX];
get_block_path (bend, block_id, path, store_id, version);
fd = g_open (path, O_RDONLY | O_BINARY, 0);
if (fd < 0) {
ccnet_warning ("[block bend] failed to open block %s for read: %s\n",
block_id, strerror(errno));
return NULL;
}
} else {
fd = open_tmp_file (bend, block_id, &tmp_file);
if (fd < 0) {
ccnet_warning ("[block bend] failed to open block %s for write: %s\n",
block_id, strerror(errno));
return NULL;
}
}
handle = g_new0(BHandle, 1);
handle->fd = fd;
memcpy (handle->block_id, block_id, 41);
handle->rw_type = rw_type;
if (rw_type == BLOCK_WRITE)
handle->tmp_file = tmp_file;
if (store_id)
handle->store_id = g_strdup(store_id);
handle->version = version;
return handle;
}
static int
block_backend_fs_read_block (BlockBackend *bend,
BHandle *handle,
void *buf, int len)
{
int ret;
ret = readn (handle->fd, buf, len);
if (ret < 0)
seaf_warning ("Failed to read block %s:%s: %s.\n",
handle->store_id, handle->block_id, strerror (errno));
return ret;
}
static int
block_backend_fs_write_block (BlockBackend *bend,
BHandle *handle,
const void *buf, int len)
{
int ret;
ret = writen (handle->fd, buf, len);
if (ret < 0)
seaf_warning ("Failed to write block %s:%s: %s.\n",
handle->store_id, handle->block_id, strerror (errno));
return ret;
}
static int
block_backend_fs_close_block (BlockBackend *bend,
BHandle *handle)
{
int ret;
ret = close (handle->fd);
return ret;
}
static void
block_backend_fs_block_handle_free (BlockBackend *bend,
BHandle *handle)
{
if (handle->rw_type == BLOCK_WRITE) {
/* make sure the tmp file is removed even on failure. */
g_unlink (handle->tmp_file);
g_free (handle->tmp_file);
}
g_free (handle->store_id);
g_free (handle);
}
static int
create_parent_path (const char *path)
{
char *dir = g_path_get_dirname (path);
if (!dir)
return -1;
if (g_file_test (dir, G_FILE_TEST_EXISTS)) {
g_free (dir);
return 0;
}
if (g_mkdir_with_parents (dir, 0777) < 0) {
seaf_warning ("Failed to create object parent path: %s.\n", dir);
g_free (dir);
return -1;
}
g_free (dir);
return 0;
}
static int
block_backend_fs_commit_block (BlockBackend *bend,
BHandle *handle)
{
char path[SEAF_PATH_MAX];
g_return_val_if_fail (handle->rw_type == BLOCK_WRITE, -1);
get_block_path (bend, handle->block_id, path, handle->store_id, handle->version);
if (create_parent_path (path) < 0) {
seaf_warning ("Failed to create path for block %s:%s.\n",
handle->store_id, handle->block_id);
return -1;
}
if (g_rename (handle->tmp_file, path) < 0) {
seaf_warning ("[block bend] failed to commit block %s:%s: %s\n",
handle->store_id, handle->block_id, strerror(errno));
return -1;
}
return 0;
}
static gboolean
block_backend_fs_block_exists (BlockBackend *bend,
const char *store_id,
int version,
const char *block_sha1)
{
char block_path[SEAF_PATH_MAX];
get_block_path (bend, block_sha1, block_path, store_id, version);
if (g_access (block_path, F_OK) == 0)
return TRUE;
else
return FALSE;
}
static int
block_backend_fs_remove_block (BlockBackend *bend,
const char *store_id,
int version,
const char *block_id)
{
char path[SEAF_PATH_MAX];
get_block_path (bend, block_id, path, store_id, version);
return g_unlink (path);
}
static BMetadata *
block_backend_fs_stat_block (BlockBackend *bend,
const char *store_id,
int version,
const char *block_id)
{
char path[SEAF_PATH_MAX];
SeafStat st;
BMetadata *block_md;
get_block_path (bend, block_id, path, store_id, version);
if (seaf_stat (path, &st) < 0) {
seaf_warning ("[block bend] Failed to stat block %s:%s at %s: %s.\n",
store_id, block_id, path, strerror(errno));
return NULL;
}
block_md = g_new0(BMetadata, 1);
memcpy (block_md->id, block_id, 40);
block_md->size = (uint32_t) st.st_size;
return block_md;
}
static BMetadata *
block_backend_fs_stat_block_by_handle (BlockBackend *bend,
BHandle *handle)
{
SeafStat st;
BMetadata *block_md;
if (seaf_fstat (handle->fd, &st) < 0) {
seaf_warning ("[block bend] Failed to stat block %s:%s.\n",
handle->store_id, handle->block_id);
return NULL;
}
block_md = g_new0(BMetadata, 1);
memcpy (block_md->id, handle->block_id, 40);
block_md->size = (uint32_t) st.st_size;
return block_md;
}
static int
block_backend_fs_foreach_block (BlockBackend *bend,
const char *store_id,
int version,
SeafBlockFunc process,
void *user_data)
{
FsPriv *priv = bend->be_priv;
char *block_dir = NULL;
int dir_len;
GDir *dir1 = NULL, *dir2;
const char *dname1, *dname2;
char block_id[128];
char path[SEAF_PATH_MAX], *pos;
int ret = 0;
#if defined MIGRATION
if (version > 0)
block_dir = g_build_filename (priv->block_dir, store_id, NULL);
#else
block_dir = g_build_filename (priv->block_dir, store_id, NULL);
#endif
dir_len = strlen (block_dir);
dir1 = g_dir_open (block_dir, 0, NULL);
if (!dir1) {
goto out;
}
memcpy (path, block_dir, dir_len);
pos = path + dir_len;
while ((dname1 = g_dir_read_name(dir1)) != NULL) {
snprintf (pos, sizeof(path) - dir_len, "/%s", dname1);
dir2 = g_dir_open (path, 0, NULL);
if (!dir2) {
seaf_warning ("Failed to open block dir %s.\n", path);
continue;
}
while ((dname2 = g_dir_read_name(dir2)) != NULL) {
snprintf (block_id, sizeof(block_id), "%s%s", dname1, dname2);
if (!process (store_id, version, block_id, user_data)) {
g_dir_close (dir2);
goto out;
}
}
g_dir_close (dir2);
}
out:
if (dir1)
g_dir_close (dir1);
g_free (block_dir);
return ret;
}
static int
block_backend_fs_copy (BlockBackend *bend,
const char *src_store_id,
int src_version,
const char *dst_store_id,
int dst_version,
const char *block_id)
{
char src_path[SEAF_PATH_MAX];
char dst_path[SEAF_PATH_MAX];
get_block_path (bend, block_id, src_path, src_store_id, src_version);
get_block_path (bend, block_id, dst_path, dst_store_id, dst_version);
if (g_file_test (dst_path, G_FILE_TEST_EXISTS))
return 0;
if (create_parent_path (dst_path) < 0) {
seaf_warning ("Failed to create dst path %s for block %s.\n",
dst_path, block_id);
return -1;
}
#ifdef WIN32
if (!CreateHardLink (dst_path, src_path, NULL)) {
seaf_warning ("Failed to link %s to %s: %lu.\n",
src_path, dst_path, GetLastError());
return -1;
}
return 0;
#else
int ret = link (src_path, dst_path);
if (ret < 0 && errno != EEXIST) {
seaf_warning ("Failed to link %s to %s: %s.\n",
src_path, dst_path, strerror(errno));
return -1;
}
return ret;
#endif
}
static int
block_backend_fs_remove_store (BlockBackend *bend, const char *store_id)
{
FsPriv *priv = bend->be_priv;
char *block_dir = NULL;
GDir *dir1, *dir2;
const char *dname1, *dname2;
char *path1, *path2;
block_dir = g_build_filename (priv->block_dir, store_id, NULL);
dir1 = g_dir_open (block_dir, 0, NULL);
if (!dir1) {
g_free (block_dir);
return 0;
}
while ((dname1 = g_dir_read_name(dir1)) != NULL) {
path1 = g_build_filename (block_dir, dname1, NULL);
dir2 = g_dir_open (path1, 0, NULL);
if (!dir2) {
seaf_warning ("Failed to open block dir %s.\n", path1);
g_dir_close (dir1);
g_free (path1);
g_free (block_dir);
return -1;
}
while ((dname2 = g_dir_read_name(dir2)) != NULL) {
path2 = g_build_filename (path1, dname2, NULL);
g_unlink (path2);
g_free (path2);
}
g_dir_close (dir2);
g_rmdir (path1);
g_free (path1);
}
g_dir_close (dir1);
g_rmdir (block_dir);
g_free (block_dir);
return 0;
}
static char *
get_block_path (BlockBackend *bend,
const char *block_sha1,
char path[],
const char *store_id,
int version)
{
FsPriv *priv = bend->be_priv;
char *pos = path;
int n;
#if defined MIGRATION
if (version > 0) {
n = snprintf (path, SEAF_PATH_MAX, "%s/%s/", priv->block_dir, store_id);
pos += n;
} else
#else
n = snprintf (path, SEAF_PATH_MAX, "%s/%s/", priv->block_dir, store_id);
pos += n;
#endif
memcpy (pos, block_sha1, 2);
pos[2] = '/';
pos += 3;
memcpy (pos, block_sha1 + 2, 41 - 2);
return path;
}
static int
open_tmp_file (BlockBackend *bend,
const char *basename,
char **path)
{
FsPriv *priv = bend->be_priv;
int fd;
*path = g_strdup_printf ("%s/%s.XXXXXX", priv->tmp_dir, basename);
fd = g_mkstemp (*path);
if (fd < 0)
g_free (*path);
return fd;
}
BlockBackend *
block_backend_fs_new (const char *seaf_dir, const char *tmp_dir)
{
BlockBackend *bend;
FsPriv *priv;
bend = g_new0(BlockBackend, 1);
priv = g_new0(FsPriv, 1);
bend->be_priv = priv;
priv->block_dir = g_build_filename (seaf_dir, "storage", "blocks", NULL);
priv->block_dir_len = strlen (priv->block_dir);
priv->tmp_dir = g_strdup (tmp_dir);
priv->tmp_dir_len = strlen (tmp_dir);
if (g_mkdir_with_parents (priv->block_dir, 0777) < 0) {
seaf_warning ("Block dir %s does not exist and"
" is unable to create\n", priv->block_dir);
goto onerror;
}
if (g_mkdir_with_parents (tmp_dir, 0777) < 0) {
seaf_warning ("Blocks tmp dir %s does not exist and"
" is unable to create\n", tmp_dir);
goto onerror;
}
bend->open_block = block_backend_fs_open_block;
bend->read_block = block_backend_fs_read_block;
bend->write_block = block_backend_fs_write_block;
bend->commit_block = block_backend_fs_commit_block;
bend->close_block = block_backend_fs_close_block;
bend->exists = block_backend_fs_block_exists;
bend->remove_block = block_backend_fs_remove_block;
bend->stat_block = block_backend_fs_stat_block;
bend->stat_block_by_handle = block_backend_fs_stat_block_by_handle;
bend->block_handle_free = block_backend_fs_block_handle_free;
bend->foreach_block = block_backend_fs_foreach_block;
bend->remove_store = block_backend_fs_remove_store;
bend->copy = block_backend_fs_copy;
return bend;
onerror:
g_free (bend->be_priv);
g_free (bend);
return NULL;
}
================================================
FILE: common/block-backend.c
================================================
#include "common.h"
#include "log.h"
#include "block-backend.h"
extern BlockBackend *
block_backend_fs_new (const char *block_dir, const char *tmp_dir);
BlockBackend*
load_filesystem_block_backend(GKeyFile *config)
{
BlockBackend *bend;
char *tmp_dir;
char *block_dir;
block_dir = g_key_file_get_string (config, "block_backend", "block_dir", NULL);
if (!block_dir) {
seaf_warning ("Block dir not set in config.\n");
return NULL;
}
tmp_dir = g_key_file_get_string (config, "block_backend", "tmp_dir", NULL);
if (!tmp_dir) {
seaf_warning ("Block tmp dir not set in config.\n");
return NULL;
}
bend = block_backend_fs_new (block_dir, tmp_dir);
g_free (block_dir);
g_free (tmp_dir);
return bend;
}
BlockBackend*
load_block_backend (GKeyFile *config)
{
char *backend;
BlockBackend *bend;
backend = g_key_file_get_string (config, "block_backend", "name", NULL);
if (!backend) {
return NULL;
}
if (strcmp(backend, "filesystem") == 0) {
bend = load_filesystem_block_backend(config);
g_free (backend);
return bend;
}
seaf_warning ("Unknown backend\n");
return NULL;
}
================================================
FILE: common/block-backend.h
================================================
/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
#ifndef BLOCK_BACKEND_H
#define BLOCK_BACKEND_H
#include "block.h"
typedef struct BlockBackend BlockBackend;
struct BlockBackend {
BHandle* (*open_block) (BlockBackend *bend,
const char *store_id, int version,
const char *block_id, int rw_type);
int (*read_block) (BlockBackend *bend, BHandle *handle, void *buf, int len);
int (*write_block) (BlockBackend *bend, BHandle *handle, const void *buf, int len);
int (*commit_block) (BlockBackend *bend, BHandle *handle);
int (*close_block) (BlockBackend *bend, BHandle *handle);
int (*exists) (BlockBackend *bend,
const char *store_id, int version,
const char *block_id);
int (*remove_block) (BlockBackend *bend,
const char *store_id, int version,
const char *block_id);
BMetadata* (*stat_block) (BlockBackend *bend,
const char *store_id, int version,
const char *block_id);
BMetadata* (*stat_block_by_handle) (BlockBackend *bend, BHandle *handle);
void (*block_handle_free) (BlockBackend *bend, BHandle *handle);
int (*foreach_block) (BlockBackend *bend,
const char *store_id,
int version,
SeafBlockFunc process,
void *user_data);
int (*copy) (BlockBackend *bend,
const char *src_store_id,
int src_version,
const char *dst_store_id,
int dst_version,
const char *block_id);
/* Only valid for version 1 repo. Remove all blocks for the repo. */
int (*remove_store) (BlockBackend *bend,
const char *store_id);
void* be_priv; /* backend private field */
};
BlockBackend* load_block_backend (GKeyFile *config);
#endif
================================================
FILE: common/block-mgr.c
================================================
/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
#include "common.h"
#include "seafile-session.h"
#include "utils.h"
#include "seaf-utils.h"
#include "block-mgr.h"
#include "log.h"
#include
#include
#include
#include
#include
#include
#include
#include
#include "block-backend.h"
#define SEAF_BLOCK_DIR "blocks"
extern BlockBackend *
block_backend_fs_new (const char *block_dir, const char *tmp_dir);
SeafBlockManager *
seaf_block_manager_new (struct _SeafileSession *seaf,
const char *seaf_dir)
{
SeafBlockManager *mgr;
mgr = g_new0 (SeafBlockManager, 1);
mgr->seaf = seaf;
mgr->backend = block_backend_fs_new (seaf_dir, seaf->tmp_file_dir);
if (!mgr->backend) {
seaf_warning ("[Block mgr] Failed to load backend.\n");
goto onerror;
}
return mgr;
onerror:
g_free (mgr);
return NULL;
}
int
seaf_block_manager_init (SeafBlockManager *mgr)
{
return 0;
}
BlockHandle *
seaf_block_manager_open_block (SeafBlockManager *mgr,
const char *store_id,
int version,
const char *block_id,
int rw_type)
{
if (!store_id || !is_uuid_valid(store_id) ||
!block_id || !is_object_id_valid(block_id))
return NULL;
return mgr->backend->open_block (mgr->backend,
store_id, version,
block_id, rw_type);
}
int
seaf_block_manager_read_block (SeafBlockManager *mgr,
BlockHandle *handle,
void *buf, int len)
{
return mgr->backend->read_block (mgr->backend, handle, buf, len);
}
int
seaf_block_manager_write_block (SeafBlockManager *mgr,
BlockHandle *handle,
const void *buf, int len)
{
return mgr->backend->write_block (mgr->backend, handle, buf, len);
}
int
seaf_block_manager_close_block (SeafBlockManager *mgr,
BlockHandle *handle)
{
return mgr->backend->close_block (mgr->backend, handle);
}
void
seaf_block_manager_block_handle_free (SeafBlockManager *mgr,
BlockHandle *handle)
{
return mgr->backend->block_handle_free (mgr->backend, handle);
}
int
seaf_block_manager_commit_block (SeafBlockManager *mgr,
BlockHandle *handle)
{
return mgr->backend->commit_block (mgr->backend, handle);
}
gboolean seaf_block_manager_block_exists (SeafBlockManager *mgr,
const char *store_id,
int version,
const char *block_id)
{
if (!store_id || !is_uuid_valid(store_id) ||
!block_id || !is_object_id_valid(block_id))
return FALSE;
return mgr->backend->exists (mgr->backend, store_id, version, block_id);
}
int
seaf_block_manager_remove_block (SeafBlockManager *mgr,
const char *store_id,
int version,
const char *block_id)
{
if (!store_id || !is_uuid_valid(store_id) ||
!block_id || !is_object_id_valid(block_id))
return -1;
return mgr->backend->remove_block (mgr->backend, store_id, version, block_id);
}
BlockMetadata *
seaf_block_manager_stat_block (SeafBlockManager *mgr,
const char *store_id,
int version,
const char *block_id)
{
if (!store_id || !is_uuid_valid(store_id) ||
!block_id || !is_object_id_valid(block_id))
return NULL;
return mgr->backend->stat_block (mgr->backend, store_id, version, block_id);
}
BlockMetadata *
seaf_block_manager_stat_block_by_handle (SeafBlockManager *mgr,
BlockHandle *handle)
{
return mgr->backend->stat_block_by_handle (mgr->backend, handle);
}
int
seaf_block_manager_foreach_block (SeafBlockManager *mgr,
const char *store_id,
int version,
SeafBlockFunc process,
void *user_data)
{
return mgr->backend->foreach_block (mgr->backend,
store_id, version,
process, user_data);
}
int
seaf_block_manager_copy_block (SeafBlockManager *mgr,
const char *src_store_id,
int src_version,
const char *dst_store_id,
int dst_version,
const char *block_id)
{
if (strcmp (block_id, EMPTY_SHA1) == 0)
return 0;
if (seaf_block_manager_block_exists (mgr, dst_store_id, dst_version, block_id)) {
return 0;
}
return mgr->backend->copy (mgr->backend,
src_store_id,
src_version,
dst_store_id,
dst_version,
block_id);
}
static gboolean
get_block_number (const char *store_id,
int version,
const char *block_id,
void *data)
{
guint64 *n_blocks = data;
++(*n_blocks);
return TRUE;
}
guint64
seaf_block_manager_get_block_number (SeafBlockManager *mgr,
const char *store_id,
int version)
{
guint64 n_blocks = 0;
seaf_block_manager_foreach_block (mgr, store_id, version,
get_block_number, &n_blocks);
return n_blocks;
}
gboolean
seaf_block_manager_verify_block (SeafBlockManager *mgr,
const char *store_id,
int version,
const char *block_id,
gboolean *io_error)
{
BlockHandle *h;
char buf[10240];
int n;
SHA_CTX ctx;
guint8 sha1[20];
char check_id[41];
h = seaf_block_manager_open_block (mgr,
store_id, version,
block_id, BLOCK_READ);
if (!h) {
seaf_warning ("Failed to open block %s:%.8s.\n", store_id, block_id);
*io_error = TRUE;
return FALSE;
}
SHA1_Init (&ctx);
while (1) {
n = seaf_block_manager_read_block (mgr, h, buf, sizeof(buf));
if (n < 0) {
seaf_warning ("Failed to read block %s:%.8s.\n", store_id, block_id);
*io_error = TRUE;
return FALSE;
}
if (n == 0)
break;
SHA1_Update (&ctx, buf, n);
}
seaf_block_manager_close_block (mgr, h);
seaf_block_manager_block_handle_free (mgr, h);
SHA1_Final (sha1, &ctx);
rawdata_to_hex (sha1, check_id, 20);
if (strcmp (check_id, block_id) == 0)
return TRUE;
else
return FALSE;
}
int
seaf_block_manager_remove_store (SeafBlockManager *mgr,
const char *store_id)
{
return mgr->backend->remove_store (mgr->backend, store_id);
}
================================================
FILE: common/block-mgr.h
================================================
/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
#ifndef SEAF_BLOCK_MGR_H
#define SEAF_BLOCK_MGR_H
#include
#include
#include
#include "block.h"
struct _SeafileSession;
typedef struct _SeafBlockManager SeafBlockManager;
struct _SeafBlockManager {
struct _SeafileSession *seaf;
struct BlockBackend *backend;
};
SeafBlockManager *
seaf_block_manager_new (struct _SeafileSession *seaf,
const char *seaf_dir);
/*
* Open a block for read or write.
*
* @store_id: id for the block store
* @version: data format version for the repo
* @block_id: ID of block.
* @rw_type: BLOCK_READ or BLOCK_WRITE.
* Returns: A handle for the block.
*/
BlockHandle *
seaf_block_manager_open_block (SeafBlockManager *mgr,
const char *store_id,
int version,
const char *block_id,
int rw_type);
/*
* Read data from a block.
* The semantics is similar to readn.
*
* @handle: Hanlde returned by seaf_block_manager_open_block().
* @buf: Data wuold be copied into this buf.
* @len: At most @len bytes would be read.
*
* Returns: the bytes read.
*/
int
seaf_block_manager_read_block (SeafBlockManager *mgr,
BlockHandle *handle,
void *buf, int len);
/*
* Write data to a block.
* The semantics is similar to writen.
*
* @handle: Hanlde returned by seaf_block_manager_open_block().
* @buf: Data to be written to the block.
* @len: At most @len bytes would be written.
*
* Returns: the bytes written.
*/
int
seaf_block_manager_write_block (SeafBlockManager *mgr,
BlockHandle *handle,
const void *buf, int len);
/*
* Commit a block to storage.
* The block must be opened for write.
*
* @handle: Hanlde returned by seaf_block_manager_open_block().
*
* Returns: 0 on success, -1 on error.
*/
int
seaf_block_manager_commit_block (SeafBlockManager *mgr,
BlockHandle *handle);
/*
* Close an open block.
*
* @handle: Hanlde returned by seaf_block_manager_open_block().
*
* Returns: 0 on success, -1 on error.
*/
int
seaf_block_manager_close_block (SeafBlockManager *mgr,
BlockHandle *handle);
void
seaf_block_manager_block_handle_free (SeafBlockManager *mgr,
BlockHandle *handle);
gboolean
seaf_block_manager_block_exists (SeafBlockManager *mgr,
const char *store_id,
int version,
const char *block_id);
int
seaf_block_manager_remove_block (SeafBlockManager *mgr,
const char *store_id,
int version,
const char *block_id);
BlockMetadata *
seaf_block_manager_stat_block (SeafBlockManager *mgr,
const char *store_id,
int version,
const char *block_id);
BlockMetadata *
seaf_block_manager_stat_block_by_handle (SeafBlockManager *mgr,
BlockHandle *handle);
int
seaf_block_manager_foreach_block (SeafBlockManager *mgr,
const char *store_id,
int version,
SeafBlockFunc process,
void *user_data);
int
seaf_block_manager_copy_block (SeafBlockManager *mgr,
const char *src_store_id,
int src_version,
const char *dst_store_id,
int dst_version,
const char *block_id);
/* Remove all blocks for a repo. Only valid for version 1 repo. */
int
seaf_block_manager_remove_store (SeafBlockManager *mgr,
const char *store_id);
guint64
seaf_block_manager_get_block_number (SeafBlockManager *mgr,
const char *store_id,
int version);
gboolean
seaf_block_manager_verify_block (SeafBlockManager *mgr,
const char *store_id,
int version,
const char *block_id,
gboolean *io_error);
#endif
================================================
FILE: common/block-tx-utils.c
================================================
#include "common.h"
#define DEBUG_FLAG SEAFILE_DEBUG_TRANSFER
#include "log.h"
#include "utils.h"
#include "block-tx-utils.h"
/* Utility functions for block transfer protocol. */
/* Encryption related functions. */
void
blocktx_generate_encrypt_key (unsigned char *session_key, int sk_len,
unsigned char *key, unsigned char *iv)
{
EVP_BytesToKey (EVP_aes_256_cbc(), /* cipher mode */
EVP_sha1(), /* message digest */
NULL, /* salt */
session_key,
sk_len,
3, /* iteration times */
key, /* the derived key */
iv); /* IV, initial vector */
}
int
blocktx_encrypt_init (EVP_CIPHER_CTX **ctx,
const unsigned char *key,
const unsigned char *iv)
{
int ret;
/* Prepare CTX for encryption. */
*ctx = EVP_CIPHER_CTX_new ();
ret = EVP_EncryptInit_ex (*ctx,
EVP_aes_256_cbc(), /* cipher mode */
NULL, /* engine, NULL for default */
key, /* derived key */
iv); /* initial vector */
if (ret == 0)
return -1;
return 0;
}
int
blocktx_decrypt_init (EVP_CIPHER_CTX **ctx,
const unsigned char *key,
const unsigned char *iv)
{
int ret;
/* Prepare CTX for decryption. */
*ctx = EVP_CIPHER_CTX_new();
ret = EVP_DecryptInit_ex (*ctx,
EVP_aes_256_cbc(), /* cipher mode */
NULL, /* engine, NULL for default */
key, /* derived key */
iv); /* initial vector */
if (ret == 0)
return -1;
return 0;
}
/* Sending frame */
int
send_encrypted_data_frame_begin (evutil_socket_t data_fd,
int frame_len)
{
/* Compute data size after encryption.
* Block size is 16 bytes and AES always add one padding block.
*/
int enc_frame_len;
enc_frame_len = ((frame_len >> 4) + 1) << 4;
enc_frame_len = htonl (enc_frame_len);
if (sendn (data_fd, &enc_frame_len, sizeof(int)) < 0) {
seaf_warning ("Failed to send frame length: %s.\n",
evutil_socket_error_to_string(evutil_socket_geterror(data_fd)));
return -1;
}
return 0;
}
int
send_encrypted_data (EVP_CIPHER_CTX *ctx,
evutil_socket_t data_fd,
const void *buf, int len)
{
char out_buf[len + ENC_BLOCK_SIZE];
int out_len;
if (EVP_EncryptUpdate (ctx,
(unsigned char *)out_buf, &out_len,
(unsigned char *)buf, len) == 0) {
seaf_warning ("Failed to encrypt data.\n");
return -1;
}
if (sendn (data_fd, out_buf, out_len) < 0) {
seaf_warning ("Failed to write data: %s.\n",
evutil_socket_error_to_string(evutil_socket_geterror(data_fd)));
return -1;
}
return 0;
}
int
send_encrypted_data_frame_end (EVP_CIPHER_CTX *ctx,
evutil_socket_t data_fd)
{
char out_buf[ENC_BLOCK_SIZE];
int out_len;
if (EVP_EncryptFinal_ex (ctx, (unsigned char *)out_buf, &out_len) == 0) {
seaf_warning ("Failed to encrypt data.\n");
return -1;
}
if (sendn (data_fd, out_buf, out_len) < 0) {
seaf_warning ("Failed to write data: %s.\n",
evutil_socket_error_to_string(evutil_socket_geterror(data_fd)));
return -1;
}
return 0;
}
/* Receiving frame */
static int
handle_frame_content (struct evbuffer *buf, FrameParser *parser)
{
char *frame;
EVP_CIPHER_CTX *ctx;
char *out;
int outlen, outlen2;
int ret = 0;
struct evbuffer *input = buf;
if (evbuffer_get_length (input) < parser->enc_frame_len)
return 0;
if (parser->version == 1)
blocktx_decrypt_init (&ctx, parser->key, parser->iv);
else if (parser->version == 2)
blocktx_decrypt_init (&ctx, parser->key_v2, parser->iv_v2);
frame = g_malloc (parser->enc_frame_len);
out = g_malloc (parser->enc_frame_len + ENC_BLOCK_SIZE);
evbuffer_remove (input, frame, parser->enc_frame_len);
if (EVP_DecryptUpdate (ctx,
(unsigned char *)out, &outlen,
(unsigned char *)frame,
parser->enc_frame_len) == 0) {
seaf_warning ("Failed to decrypt frame content.\n");
ret = -1;
goto out;
}
if (EVP_DecryptFinal_ex (ctx, (unsigned char *)(out + outlen), &outlen2) == 0)
{
seaf_warning ("Failed to decrypt frame content.\n");
ret = -1;
goto out;
}
ret = parser->content_cb (out, outlen + outlen2, parser->cbarg);
out:
g_free (frame);
g_free (out);
parser->enc_frame_len = 0;
EVP_CIPHER_CTX_free (ctx);
return ret;
}
int
handle_one_frame (struct evbuffer *buf, FrameParser *parser)
{
struct evbuffer *input = buf;
if (!parser->enc_frame_len) {
/* Read the length of the encrypted frame first. */
if (evbuffer_get_length (input) < sizeof(int))
return 0;
int frame_len;
evbuffer_remove (input, &frame_len, sizeof(int));
parser->enc_frame_len = ntohl (frame_len);
if (evbuffer_get_length (input) > 0)
return handle_frame_content (buf, parser);
return 0;
} else {
return handle_frame_content (buf, parser);
}
}
static int
handle_frame_fragment_content (struct evbuffer *buf, FrameParser *parser)
{
char *fragment = NULL, *out = NULL;
int fragment_len, outlen;
int ret = 0;
struct evbuffer *input = buf;
fragment_len = evbuffer_get_length (input);
fragment = g_malloc (fragment_len);
evbuffer_remove (input, fragment, fragment_len);
out = g_malloc (fragment_len + ENC_BLOCK_SIZE);
if (EVP_DecryptUpdate (parser->ctx,
(unsigned char *)out, &outlen,
(unsigned char *)fragment, fragment_len) == 0) {
seaf_warning ("Failed to decrypt frame fragment.\n");
ret = -1;
goto out;
}
ret = parser->fragment_cb (out, outlen, 0, parser->cbarg);
if (ret < 0)
goto out;
parser->remain -= fragment_len;
if (parser->remain <= 0) {
if (EVP_DecryptFinal_ex (parser->ctx,
(unsigned char *)out,
&outlen) == 0) {
seaf_warning ("Failed to decrypt frame fragment.\n");
ret = -1;
goto out;
}
ret = parser->fragment_cb (out, outlen, 1, parser->cbarg);
if (ret < 0)
goto out;
EVP_CIPHER_CTX_free (parser->ctx);
parser->enc_init = FALSE;
parser->enc_frame_len = 0;
}
out:
g_free (fragment);
g_free (out);
if (ret < 0) {
EVP_CIPHER_CTX_free (parser->ctx);
parser->enc_init = FALSE;
parser->enc_frame_len = 0;
}
return ret;
}
int
handle_frame_fragments (struct evbuffer *buf, FrameParser *parser)
{
struct evbuffer *input = buf;
if (!parser->enc_frame_len) {
/* Read the length of the encrypted frame first. */
if (evbuffer_get_length (input) < sizeof(int))
return 0;
int frame_len;
evbuffer_remove (input, &frame_len, sizeof(int));
parser->enc_frame_len = ntohl (frame_len);
parser->remain = parser->enc_frame_len;
if (parser->version == 1)
blocktx_decrypt_init (&parser->ctx, parser->key, parser->iv);
else if (parser->version == 2)
blocktx_decrypt_init (&parser->ctx, parser->key_v2, parser->iv_v2);
parser->enc_init = TRUE;
if (evbuffer_get_length (input) > 0)
return handle_frame_fragment_content (buf, parser);
return 0;
} else {
return handle_frame_fragment_content (buf, parser);
}
}
================================================
FILE: common/block-tx-utils.h
================================================
#ifndef BLOCK_TX_UTILS_H
#define BLOCK_TX_UTILS_H
#include
#include
#include
/* Common structures and contants shared by the client and server. */
/* We use AES 256 */
#define ENC_KEY_SIZE 32
#define ENC_BLOCK_SIZE 16
#define BLOCK_PROTOCOL_VERSION 2
enum {
STATUS_OK = 0,
STATUS_VERSION_MISMATCH,
STATUS_BAD_REQUEST,
STATUS_ACCESS_DENIED,
STATUS_INTERNAL_SERVER_ERROR,
STATUS_NOT_FOUND,
};
struct _HandshakeRequest {
gint32 version;
gint32 key_len;
char enc_session_key[0];
} __attribute__((__packed__));
typedef struct _HandshakeRequest HandshakeRequest;
struct _HandshakeResponse {
gint32 status;
gint32 version;
} __attribute__((__packed__));
typedef struct _HandshakeResponse HandshakeResponse;
struct _AuthResponse {
gint32 status;
} __attribute__((__packed__));
typedef struct _AuthResponse AuthResponse;
enum {
REQUEST_COMMAND_GET = 0,
REQUEST_COMMAND_PUT,
};
struct _RequestHeader {
gint32 command;
char block_id[40];
} __attribute__((__packed__));
typedef struct _RequestHeader RequestHeader;
struct _ResponseHeader {
gint32 status;
} __attribute__((__packed__));
typedef struct _ResponseHeader ResponseHeader;
/* Utility functions for encryption. */
void
blocktx_generate_encrypt_key (unsigned char *session_key, int sk_len,
unsigned char *key, unsigned char *iv);
int
blocktx_encrypt_init (EVP_CIPHER_CTX **ctx,
const unsigned char *key,
const unsigned char *iv);
int
blocktx_decrypt_init (EVP_CIPHER_CTX **ctx,
const unsigned char *key,
const unsigned char *iv);
/*
* Encrypted data is sent in "frames".
* Format of a frame:
*
* length of data in the frame after encryption + encrypted data.
*
* Each frame can contain three types of contents:
* 1. Auth request or response;
* 2. Block request or response header;
* 3. Block content.
*/
int
send_encrypted_data_frame_begin (evutil_socket_t data_fd,
int frame_len);
int
send_encrypted_data (EVP_CIPHER_CTX *ctx,
evutil_socket_t data_fd,
const void *buf, int len);
int
send_encrypted_data_frame_end (EVP_CIPHER_CTX *ctx,
evutil_socket_t data_fd);
typedef int (*FrameContentCB) (char *, int, void *);
typedef int (*FrameFragmentCB) (char *, int, int, void *);
typedef struct _FrameParser {
int enc_frame_len;
unsigned char key[ENC_KEY_SIZE];
unsigned char iv[ENC_BLOCK_SIZE];
gboolean enc_init;
EVP_CIPHER_CTX *ctx;
unsigned char key_v2[ENC_KEY_SIZE];
unsigned char iv_v2[ENC_BLOCK_SIZE];
int version;
/* Used when parsing fragments */
int remain;
FrameContentCB content_cb;
FrameFragmentCB fragment_cb;
void *cbarg;
} FrameParser;
/* Handle entire frame all at once.
* parser->content_cb() will be called after the entire frame is read.
*/
int
handle_one_frame (struct evbuffer *buf, FrameParser *parser);
/* Handle a frame fragment by fragment.
* parser->fragment_cb() will be called when any amount data is read.
*/
int
handle_frame_fragments (struct evbuffer *buf, FrameParser *parser);
#endif
================================================
FILE: common/block.h
================================================
/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
#ifndef BLOCK_H
#define BLOCK_H
typedef struct _BMetadata BlockMetadata;
typedef struct _BMetadata BMetadata;
struct _BMetadata {
char id[41];
uint32_t size;
};
/* Opaque block handle.
*/
typedef struct _BHandle BlockHandle;
typedef struct _BHandle BHandle;
enum {
BLOCK_READ,
BLOCK_WRITE,
};
typedef gboolean (*SeafBlockFunc) (const char *store_id,
int version,
const char *block_id,
void *user_data);
#endif
================================================
FILE: common/branch-mgr.c
================================================
#include "common.h"
#include "log.h"
#ifndef SEAFILE_SERVER
#include "db.h"
#else
#include "seaf-db.h"
#endif
#include "seafile-session.h"
#ifdef FULL_FEATURE
#include "notif-mgr.h"
#endif
#include "branch-mgr.h"
#define BRANCH_DB "branch.db"
SeafBranch *
seaf_branch_new (const char *name, const char *repo_id, const char *commit_id)
{
SeafBranch *branch;
branch = g_new0 (SeafBranch, 1);
branch->name = g_strdup (name);
memcpy (branch->repo_id, repo_id, 36);
branch->repo_id[36] = '\0';
memcpy (branch->commit_id, commit_id, 40);
branch->commit_id[40] = '\0';
branch->ref = 1;
return branch;
}
void
seaf_branch_free (SeafBranch *branch)
{
if (branch == NULL) return;
g_free (branch->name);
g_free (branch);
}
void
seaf_branch_list_free (GList *blist)
{
GList *ptr;
for (ptr = blist; ptr; ptr = ptr->next) {
seaf_branch_unref (ptr->data);
}
g_list_free (blist);
}
void
seaf_branch_set_commit (SeafBranch *branch, const char *commit_id)
{
memcpy (branch->commit_id, commit_id, 40);
branch->commit_id[40] = '\0';
}
void
seaf_branch_ref (SeafBranch *branch)
{
branch->ref++;
}
void
seaf_branch_unref (SeafBranch *branch)
{
if (!branch)
return;
if (--branch->ref <= 0)
seaf_branch_free (branch);
}
struct _SeafBranchManagerPriv {
sqlite3 *db;
#ifndef SEAFILE_SERVER
pthread_mutex_t db_lock;
#endif
};
static int open_db (SeafBranchManager *mgr);
SeafBranchManager *
seaf_branch_manager_new (struct _SeafileSession *seaf)
{
SeafBranchManager *mgr;
mgr = g_new0 (SeafBranchManager, 1);
mgr->priv = g_new0 (SeafBranchManagerPriv, 1);
mgr->seaf = seaf;
#ifndef SEAFILE_SERVER
pthread_mutex_init (&mgr->priv->db_lock, NULL);
#endif
return mgr;
}
int
seaf_branch_manager_init (SeafBranchManager *mgr)
{
return open_db (mgr);
}
static int
open_db (SeafBranchManager *mgr)
{
if (!mgr->seaf->create_tables && seaf_db_type (mgr->seaf->db) != SEAF_DB_TYPE_PGSQL)
return 0;
#ifndef SEAFILE_SERVER
char *db_path;
const char *sql;
db_path = g_build_filename (mgr->seaf->seaf_dir, BRANCH_DB, NULL);
if (sqlite_open_db (db_path, &mgr->priv->db) < 0) {
g_critical ("[Branch mgr] Failed to open branch db\n");
g_free (db_path);
return -1;
}
g_free (db_path);
sql = "CREATE TABLE IF NOT EXISTS Branch ("
"name TEXT, repo_id TEXT, commit_id TEXT);";
if (sqlite_query_exec (mgr->priv->db, sql) < 0)
return -1;
sql = "CREATE INDEX IF NOT EXISTS branch_index ON Branch(repo_id, name);";
if (sqlite_query_exec (mgr->priv->db, sql) < 0)
return -1;
#elif defined FULL_FEATURE
char *sql;
switch (seaf_db_type (mgr->seaf->db)) {
case SEAF_DB_TYPE_MYSQL:
sql = "CREATE TABLE IF NOT EXISTS Branch ("
"id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, "
"name VARCHAR(10), repo_id CHAR(41), commit_id CHAR(41),"
"UNIQUE INDEX(repo_id, name)) ENGINE = INNODB";
if (seaf_db_query (mgr->seaf->db, sql) < 0)
return -1;
break;
case SEAF_DB_TYPE_PGSQL:
sql = "CREATE TABLE IF NOT EXISTS Branch ("
"name VARCHAR(10), repo_id CHAR(40), commit_id CHAR(40),"
"PRIMARY KEY (repo_id, name))";
if (seaf_db_query (mgr->seaf->db, sql) < 0)
return -1;
break;
case SEAF_DB_TYPE_SQLITE:
sql = "CREATE TABLE IF NOT EXISTS Branch ("
"name VARCHAR(10), repo_id CHAR(41), commit_id CHAR(41),"
"PRIMARY KEY (repo_id, name))";
if (seaf_db_query (mgr->seaf->db, sql) < 0)
return -1;
break;
}
#endif
return 0;
}
int
seaf_branch_manager_add_branch (SeafBranchManager *mgr, SeafBranch *branch)
{
#ifndef SEAFILE_SERVER
char sql[256];
pthread_mutex_lock (&mgr->priv->db_lock);
sqlite3_snprintf (sizeof(sql), sql,
"SELECT 1 FROM Branch WHERE name=%Q and repo_id=%Q",
branch->name, branch->repo_id);
if (sqlite_check_for_existence (mgr->priv->db, sql))
sqlite3_snprintf (sizeof(sql), sql,
"UPDATE Branch SET commit_id=%Q WHERE "
"name=%Q and repo_id=%Q",
branch->commit_id, branch->name, branch->repo_id);
else
sqlite3_snprintf (sizeof(sql), sql,
"INSERT INTO Branch (name, repo_id, commit_id) VALUES (%Q, %Q, %Q)",
branch->name, branch->repo_id, branch->commit_id);
sqlite_query_exec (mgr->priv->db, sql);
pthread_mutex_unlock (&mgr->priv->db_lock);
return 0;
#else
char *sql;
SeafDB *db = mgr->seaf->db;
if (seaf_db_type(db) == SEAF_DB_TYPE_PGSQL) {
gboolean exists, err;
int rc;
sql = "SELECT repo_id FROM Branch WHERE name=? AND repo_id=?";
exists = seaf_db_statement_exists(db, sql, &err,
2, "string", branch->name,
"string", branch->repo_id);
if (err)
return -1;
if (exists)
rc = seaf_db_statement_query (db,
"UPDATE Branch SET commit_id=? "
"WHERE name=? AND repo_id=?",
3, "string", branch->commit_id,
"string", branch->name,
"string", branch->repo_id);
else
rc = seaf_db_statement_query (db,
"INSERT INTO Branch (name, repo_id, commit_id) VALUES (?, ?, ?)",
3, "string", branch->name,
"string", branch->repo_id,
"string", branch->commit_id);
if (rc < 0)
return -1;
} else {
int rc = seaf_db_statement_query (db,
"REPLACE INTO Branch (name, repo_id, commit_id) VALUES (?, ?, ?)",
3, "string", branch->name,
"string", branch->repo_id,
"string", branch->commit_id);
if (rc < 0)
return -1;
}
return 0;
#endif
}
int
seaf_branch_manager_del_branch (SeafBranchManager *mgr,
const char *repo_id,
const char *name)
{
#ifndef SEAFILE_SERVER
char *sql;
pthread_mutex_lock (&mgr->priv->db_lock);
sql = sqlite3_mprintf ("DELETE FROM Branch WHERE name = %Q AND "
"repo_id = '%s'", name, repo_id);
if (sqlite_query_exec (mgr->priv->db, sql) < 0)
seaf_warning ("Delete branch %s failed\n", name);
sqlite3_free (sql);
pthread_mutex_unlock (&mgr->priv->db_lock);
return 0;
#else
int rc = seaf_db_statement_query (mgr->seaf->db,
"DELETE FROM Branch WHERE name=? AND repo_id=?",
2, "string", name, "string", repo_id);
if (rc < 0)
return -1;
return 0;
#endif
}
int
seaf_branch_manager_update_branch (SeafBranchManager *mgr, SeafBranch *branch)
{
#ifndef SEAFILE_SERVER
sqlite3 *db;
char *sql;
pthread_mutex_lock (&mgr->priv->db_lock);
db = mgr->priv->db;
sql = sqlite3_mprintf ("UPDATE Branch SET commit_id = %Q "
"WHERE name = %Q AND repo_id = %Q",
branch->commit_id, branch->name, branch->repo_id);
sqlite_query_exec (db, sql);
sqlite3_free (sql);
pthread_mutex_unlock (&mgr->priv->db_lock);
return 0;
#else
int rc = seaf_db_statement_query (mgr->seaf->db,
"UPDATE Branch SET commit_id = ? "
"WHERE name = ? AND repo_id = ?",
3, "string", branch->commit_id,
"string", branch->name,
"string", branch->repo_id);
if (rc < 0)
return -1;
return 0;
#endif
}
#if defined( SEAFILE_SERVER ) && defined( FULL_FEATURE )
#include "mq-mgr.h"
static gboolean
get_commit_id (SeafDBRow *row, void *data)
{
char *out_commit_id = data;
const char *commit_id;
commit_id = seaf_db_row_get_column_text (row, 0);
memcpy (out_commit_id, commit_id, 41);
out_commit_id[40] = '\0';
return FALSE;
}
static void
publish_repo_update_event (const char *repo_id, const char *commit_id)
{
json_t *msg = json_object ();
char *msg_str = NULL;
json_object_set_new (msg, "msg_type", json_string("repo-update"));
json_object_set_new (msg, "repo_id", json_string(repo_id));
json_object_set_new (msg, "commit_id", json_string(commit_id));
msg_str = json_dumps (msg, JSON_PRESERVE_ORDER);
seaf_mq_manager_publish_event (seaf->mq_mgr, SEAFILE_SERVER_CHANNEL_EVENT, msg_str);
g_free (msg_str);
json_decref (msg);
}
static void
notify_repo_update (const char *repo_id, const char *commit_id)
{
json_t *event = NULL;
json_t *content = NULL;
char *msg = NULL;
event = json_object ();
content = json_object ();
json_object_set_new (event, "type", json_string("repo-update"));
json_object_set_new (content, "repo_id", json_string(repo_id));
json_object_set_new (content, "commit_id", json_string(commit_id));
json_object_set_new (event, "content", content);
msg = json_dumps (event, JSON_COMPACT);
if (seaf->notif_mgr)
seaf_notif_manager_send_event (seaf->notif_mgr, msg);
json_decref (event);
g_free (msg);
}
static void
on_branch_updated (SeafBranchManager *mgr, SeafBranch *branch)
{
if (seaf->is_repair)
return;
seaf_repo_manager_update_repo_info (seaf->repo_mgr, branch->repo_id, branch->commit_id);
notify_repo_update(branch->repo_id, branch->commit_id);
if (seaf_repo_manager_is_virtual_repo (seaf->repo_mgr, branch->repo_id))
return;
publish_repo_update_event (branch->repo_id, branch->commit_id);
}
static gboolean
get_gc_id (SeafDBRow *row, void *data)
{
char **out_gc_id = data;
*out_gc_id = g_strdup(seaf_db_row_get_column_text (row, 0));
return FALSE;
}
int
seaf_branch_manager_test_and_update_branch (SeafBranchManager *mgr,
SeafBranch *branch,
const char *old_commit_id,
gboolean check_gc,
const char *last_gc_id,
const char *origin_repo_id,
gboolean *gc_conflict)
{
SeafDBTrans *trans;
char *sql;
char commit_id[41] = { 0 };
char *gc_id = NULL;
if (check_gc)
*gc_conflict = FALSE;
trans = seaf_db_begin_transaction (mgr->seaf->db);
if (!trans)
return -1;
if (check_gc) {
sql = "SELECT gc_id FROM GCID WHERE repo_id = ? FOR UPDATE";
if (!origin_repo_id) {
if (seaf_db_trans_foreach_selected_row (trans, sql,
get_gc_id, &gc_id,
1, "string", branch->repo_id) < 0) {
seaf_db_rollback (trans);
seaf_db_trans_close (trans);
return -1;
}
}
else {
if (seaf_db_trans_foreach_selected_row (trans, sql,
get_gc_id, &gc_id,
1, "string", origin_repo_id) < 0) {
seaf_db_rollback (trans);
seaf_db_trans_close (trans);
return -1;
}
}
if (g_strcmp0 (last_gc_id, gc_id) != 0) {
seaf_warning ("Head branch update for repo %s conflicts with GC.\n",
branch->repo_id);
seaf_db_rollback (trans);
seaf_db_trans_close (trans);
*gc_conflict = TRUE;
g_free (gc_id);
return -1;
}
g_free (gc_id);
}
switch (seaf_db_type (mgr->seaf->db)) {
case SEAF_DB_TYPE_MYSQL:
case SEAF_DB_TYPE_PGSQL:
sql = "SELECT commit_id FROM Branch WHERE name=? "
"AND repo_id=? FOR UPDATE";
break;
case SEAF_DB_TYPE_SQLITE:
sql = "SELECT commit_id FROM Branch WHERE name=? "
"AND repo_id=?";
break;
default:
g_return_val_if_reached (-1);
}
if (seaf_db_trans_foreach_selected_row (trans, sql,
get_commit_id, commit_id,
2, "string", branch->name,
"string", branch->repo_id) < 0) {
seaf_db_rollback (trans);
seaf_db_trans_close (trans);
return -1;
}
if (strcmp (old_commit_id, commit_id) != 0) {
seaf_db_rollback (trans);
seaf_db_trans_close (trans);
return -1;
}
sql = "UPDATE Branch SET commit_id = ? "
"WHERE name = ? AND repo_id = ?";
if (seaf_db_trans_query (trans, sql, 3, "string", branch->commit_id,
"string", branch->name,
"string", branch->repo_id) < 0) {
seaf_db_rollback (trans);
seaf_db_trans_close (trans);
return -1;
}
if (seaf_db_commit (trans) < 0) {
seaf_db_rollback (trans);
seaf_db_trans_close (trans);
return -1;
}
seaf_db_trans_close (trans);
on_branch_updated (mgr, branch);
return 0;
}
#endif
#ifndef SEAFILE_SERVER
static SeafBranch *
real_get_branch (SeafBranchManager *mgr,
const char *repo_id,
const char *name)
{
SeafBranch *branch = NULL;
sqlite3_stmt *stmt;
sqlite3 *db;
char *sql;
int result;
pthread_mutex_lock (&mgr->priv->db_lock);
db = mgr->priv->db;
sql = sqlite3_mprintf ("SELECT commit_id FROM Branch "
"WHERE name = %Q and repo_id='%s'",
name, repo_id);
if (!(stmt = sqlite_query_prepare (db, sql))) {
seaf_warning ("[Branch mgr] Couldn't prepare query %s\n", sql);
sqlite3_free (sql);
pthread_mutex_unlock (&mgr->priv->db_lock);
return NULL;
}
sqlite3_free (sql);
result = sqlite3_step (stmt);
if (result == SQLITE_ROW) {
char *commit_id = (char *)sqlite3_column_text (stmt, 0);
branch = seaf_branch_new (name, repo_id, commit_id);
pthread_mutex_unlock (&mgr->priv->db_lock);
sqlite3_finalize (stmt);
return branch;
} else if (result == SQLITE_ERROR) {
const char *str = sqlite3_errmsg (db);
seaf_warning ("Couldn't prepare query, error: %d->'%s'\n",
result, str ? str : "no error given");
}
sqlite3_finalize (stmt);
pthread_mutex_unlock (&mgr->priv->db_lock);
return NULL;
}
SeafBranch *
seaf_branch_manager_get_branch (SeafBranchManager *mgr,
const char *repo_id,
const char *name)
{
SeafBranch *branch;
/* "fetch_head" maps to "local" or "master" on client (LAN sync) */
if (strcmp (name, "fetch_head") == 0) {
branch = real_get_branch (mgr, repo_id, "local");
if (!branch) {
branch = real_get_branch (mgr, repo_id, "master");
}
return branch;
} else {
return real_get_branch (mgr, repo_id, name);
}
}
#else
static gboolean
get_branch (SeafDBRow *row, void *vid)
{
char *ret = vid;
const char *commit_id;
commit_id = seaf_db_row_get_column_text (row, 0);
memcpy (ret, commit_id, 41);
return FALSE;
}
static SeafBranch *
real_get_branch (SeafBranchManager *mgr,
const char *repo_id,
const char *name)
{
char commit_id[41];
char *sql;
commit_id[0] = 0;
sql = "SELECT commit_id FROM Branch WHERE name=? AND repo_id=?";
if (seaf_db_statement_foreach_row (mgr->seaf->db, sql,
get_branch, commit_id,
2, "string", name, "string", repo_id) < 0) {
seaf_warning ("[branch mgr] DB error when get branch %s.\n", name);
return NULL;
}
if (commit_id[0] == 0)
return NULL;
return seaf_branch_new (name, repo_id, commit_id);
}
SeafBranch *
seaf_branch_manager_get_branch (SeafBranchManager *mgr,
const char *repo_id,
const char *name)
{
SeafBranch *branch;
/* "fetch_head" maps to "master" on server. */
if (strcmp (name, "fetch_head") == 0) {
branch = real_get_branch (mgr, repo_id, "master");
return branch;
} else {
return real_get_branch (mgr, repo_id, name);
}
}
#endif /* not SEAFILE_SERVER */
gboolean
seaf_branch_manager_branch_exists (SeafBranchManager *mgr,
const char *repo_id,
const char *name)
{
#ifndef SEAFILE_SERVER
char *sql;
gboolean ret;
pthread_mutex_lock (&mgr->priv->db_lock);
sql = sqlite3_mprintf ("SELECT name FROM Branch WHERE name = %Q "
"AND repo_id='%s'", name, repo_id);
ret = sqlite_check_for_existence (mgr->priv->db, sql);
sqlite3_free (sql);
pthread_mutex_unlock (&mgr->priv->db_lock);
return ret;
#else
gboolean db_err = FALSE;
return seaf_db_statement_exists (mgr->seaf->db,
"SELECT name FROM Branch WHERE name=? "
"AND repo_id=?", &db_err,
2, "string", name, "string", repo_id);
#endif
}
#ifndef SEAFILE_SERVER
GList *
seaf_branch_manager_get_branch_list (SeafBranchManager *mgr,
const char *repo_id)
{
sqlite3 *db = mgr->priv->db;
int result;
sqlite3_stmt *stmt;
char sql[256];
char *name;
char *commit_id;
GList *ret = NULL;
SeafBranch *branch;
snprintf (sql, 256, "SELECT name, commit_id FROM branch WHERE repo_id ='%s'",
repo_id);
pthread_mutex_lock (&mgr->priv->db_lock);
if ( !(stmt = sqlite_query_prepare(db, sql)) ) {
pthread_mutex_unlock (&mgr->priv->db_lock);
return NULL;
}
while (1) {
result = sqlite3_step (stmt);
if (result == SQLITE_ROW) {
name = (char *)sqlite3_column_text(stmt, 0);
commit_id = (char *)sqlite3_column_text(stmt, 1);
branch = seaf_branch_new (name, repo_id, commit_id);
ret = g_list_prepend (ret, branch);
}
if (result == SQLITE_DONE)
break;
if (result == SQLITE_ERROR) {
const gchar *str = sqlite3_errmsg (db);
seaf_warning ("Couldn't prepare query, error: %d->'%s'\n",
result, str ? str : "no error given");
sqlite3_finalize (stmt);
seaf_branch_list_free (ret);
pthread_mutex_unlock (&mgr->priv->db_lock);
return NULL;
}
}
sqlite3_finalize (stmt);
pthread_mutex_unlock (&mgr->priv->db_lock);
return g_list_reverse(ret);
}
#else
static gboolean
get_branches (SeafDBRow *row, void *vplist)
{
GList **plist = vplist;
const char *commit_id;
const char *name;
const char *repo_id;
SeafBranch *branch;
name = seaf_db_row_get_column_text (row, 0);
repo_id = seaf_db_row_get_column_text (row, 1);
commit_id = seaf_db_row_get_column_text (row, 2);
branch = seaf_branch_new (name, repo_id, commit_id);
*plist = g_list_prepend (*plist, branch);
return TRUE;
}
GList *
seaf_branch_manager_get_branch_list (SeafBranchManager *mgr,
const char *repo_id)
{
GList *ret = NULL;
char *sql;
sql = "SELECT name, repo_id, commit_id FROM Branch WHERE repo_id=?";
if (seaf_db_statement_foreach_row (mgr->seaf->db, sql,
get_branches, &ret,
1, "string", repo_id) < 0) {
seaf_warning ("[branch mgr] DB error when get branch list.\n");
return NULL;
}
return ret;
}
#endif
================================================
FILE: common/branch-mgr.h
================================================
#ifndef SEAF_BRANCH_MGR_H
#define SEAF_BRANCH_MGR_H
#include "commit-mgr.h"
#define NO_BRANCH "-"
typedef struct _SeafBranch SeafBranch;
struct _SeafBranch {
int ref;
char *name;
char repo_id[37];
char commit_id[41];
};
SeafBranch *seaf_branch_new (const char *name,
const char *repo_id,
const char *commit_id);
void seaf_branch_free (SeafBranch *branch);
void seaf_branch_set_commit (SeafBranch *branch, const char *commit_id);
void seaf_branch_ref (SeafBranch *branch);
void seaf_branch_unref (SeafBranch *branch);
typedef struct _SeafBranchManager SeafBranchManager;
typedef struct _SeafBranchManagerPriv SeafBranchManagerPriv;
struct _SeafileSession;
struct _SeafBranchManager {
struct _SeafileSession *seaf;
SeafBranchManagerPriv *priv;
};
SeafBranchManager *seaf_branch_manager_new (struct _SeafileSession *seaf);
int seaf_branch_manager_init (SeafBranchManager *mgr);
int
seaf_branch_manager_add_branch (SeafBranchManager *mgr, SeafBranch *branch);
int
seaf_branch_manager_del_branch (SeafBranchManager *mgr,
const char *repo_id,
const char *name);
void
seaf_branch_list_free (GList *blist);
int
seaf_branch_manager_update_branch (SeafBranchManager *mgr,
SeafBranch *branch);
#ifdef SEAFILE_SERVER
/**
* Atomically test whether the current head commit id on @branch
* is the same as @old_commit_id and update branch in db.
*/
int
seaf_branch_manager_test_and_update_branch (SeafBranchManager *mgr,
SeafBranch *branch,
const char *old_commit_id,
gboolean check_gc,
const char *last_gc_id,
const char *origin_repo_id,
gboolean *gc_conflict);
#endif
SeafBranch *
seaf_branch_manager_get_branch (SeafBranchManager *mgr,
const char *repo_id,
const char *name);
gboolean
seaf_branch_manager_branch_exists (SeafBranchManager *mgr,
const char *repo_id,
const char *name);
GList *
seaf_branch_manager_get_branch_list (SeafBranchManager *mgr,
const char *repo_id);
gint64
seaf_branch_manager_calculate_branch_size (SeafBranchManager *mgr,
const char *repo_id,
const char *commit_id);
#endif /* SEAF_BRANCH_MGR_H */
================================================
FILE: common/cdc/Makefile.am
================================================
AM_CFLAGS = -I$(top_srcdir)/common -I$(top_srcdir)/lib \
-Wall @GLIB2_CFLAGS@ @MSVC_CFLAGS@
noinst_LTLIBRARIES = libcdc.la
noinst_HEADERS = cdc.h rabin-checksum.h
libcdc_la_SOURCES = cdc.c rabin-checksum.c
libcdc_la_LDFLAGS = -Wl,-z -Wl,defs
libcdc_la_LIBADD = @SSL_LIBS@ @GLIB2_LIBS@ \
$(top_builddir)/lib/libseafile_common.la
================================================
FILE: common/cdc/cdc.c
================================================
/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
#include "common.h"
#include "log.h"
#include
#include
#include
#include
#include
#include
#include
#include
#include "utils.h"
#include "cdc.h"
#include "../seafile-crypt.h"
#include "rabin-checksum.h"
#define finger rabin_checksum
#define rolling_finger rabin_rolling_checksum
#define BLOCK_SZ (1024*1024*1)
#define BLOCK_MIN_SZ (1024*256)
#define BLOCK_MAX_SZ (1024*1024*4)
#define BLOCK_WIN_SZ 48
#define NAME_MAX_SZ 4096
#define BREAK_VALUE 0x0013 ///0x0513
#define READ_SIZE 1024 * 4
#define BYTE_TO_HEX(b) (((b)>=10)?('a'+b-10):('0'+b))
static int default_write_chunk (CDCDescriptor *chunk_descr)
{
char filename[NAME_MAX_SZ];
char chksum_str[CHECKSUM_LENGTH *2 + 1];
int fd_chunk, ret;
memset(chksum_str, 0, sizeof(chksum_str));
rawdata_to_hex (chunk_descr->checksum, chksum_str, CHECKSUM_LENGTH);
snprintf (filename, NAME_MAX_SZ, "./%s", chksum_str);
fd_chunk = g_open (filename, O_RDWR | O_CREAT | O_BINARY, 0644);
if (fd_chunk < 0)
return -1;
ret = writen (fd_chunk, chunk_descr->block_buf, chunk_descr->len);
close (fd_chunk);
return ret;
}
static int init_cdc_file_descriptor (int fd,
uint64_t file_size,
CDCFileDescriptor *file_descr)
{
int max_block_nr = 0;
int block_min_sz = 0;
file_descr->block_nr = 0;
if (file_descr->block_min_sz <= 0)
file_descr->block_min_sz = BLOCK_MIN_SZ;
if (file_descr->block_max_sz <= 0)
file_descr->block_max_sz = BLOCK_MAX_SZ;
if (file_descr->block_sz <= 0)
file_descr->block_sz = BLOCK_SZ;
if (file_descr->write_block == NULL)
file_descr->write_block = (WriteblockFunc)default_write_chunk;
block_min_sz = file_descr->block_min_sz;
max_block_nr = ((file_size + block_min_sz - 1) / block_min_sz);
file_descr->blk_sha1s = (uint8_t *)calloc (sizeof(uint8_t),
max_block_nr * CHECKSUM_LENGTH);
file_descr->max_block_nr = max_block_nr;
return 0;
}
#define WRITE_CDC_BLOCK(block_sz, write_data) \
do { \
int _block_sz = (block_sz); \
chunk_descr.len = _block_sz; \
chunk_descr.offset = offset; \
ret = file_descr->write_block (file_descr->repo_id, \
file_descr->version, \
&chunk_descr, \
crypt, chunk_descr.checksum, \
(write_data)); \
if (ret < 0) { \
free (buf); \
g_warning ("CDC: failed to write chunk.\n"); \
return -1; \
} \
memcpy (file_descr->blk_sha1s + \
file_descr->block_nr * CHECKSUM_LENGTH, \
chunk_descr.checksum, CHECKSUM_LENGTH); \
SHA1_Update (&file_ctx, chunk_descr.checksum, 20); \
file_descr->block_nr++; \
offset += _block_sz; \
\
memmove (buf, buf + _block_sz, tail - _block_sz); \
tail = tail - _block_sz; \
cur = 0; \
}while(0);
/* content-defined chunking */
int file_chunk_cdc(int fd_src,
CDCFileDescriptor *file_descr,
SeafileCrypt *crypt,
gboolean write_data,
gint64 *indexed)
{
char *buf;
uint32_t buf_sz;
SHA_CTX file_ctx;
CDCDescriptor chunk_descr;
SHA1_Init (&file_ctx);
SeafStat sb;
if (seaf_fstat (fd_src, &sb) < 0) {
seaf_warning ("CDC: failed to stat: %s.\n", strerror(errno));
return -1;
}
uint64_t expected_size = sb.st_size;
init_cdc_file_descriptor (fd_src, expected_size, file_descr);
uint32_t block_min_sz = file_descr->block_min_sz;
uint32_t block_mask = file_descr->block_sz - 1;
int fingerprint = 0;
int offset = 0;
int ret = 0;
int tail, cur, rsize;
buf_sz = file_descr->block_max_sz;
buf = chunk_descr.block_buf = malloc (buf_sz);
if (!buf)
return -1;
/* buf: a fix-sized buffer.
* cur: data behind (inclusive) this offset has been scanned.
* cur + 1 is the bytes that has been scanned.
* tail: length of data loaded into memory. buf[tail] is invalid.
*/
tail = cur = 0;
while (1) {
if (tail < block_min_sz) {
rsize = block_min_sz - tail + READ_SIZE;
} else {
rsize = (buf_sz - tail < READ_SIZE) ? (buf_sz - tail) : READ_SIZE;
}
ret = readn (fd_src, buf + tail, rsize);
if (ret < 0) {
seaf_warning ("CDC: failed to read: %s.\n", strerror(errno));
free (buf);
return -1;
}
tail += ret;
file_descr->file_size += ret;
if (file_descr->file_size > expected_size) {
seaf_warning ("File size changed while chunking.\n");
free (buf);
return -1;
}
/* We've read all the data in this file. Output the block immediately
* in two cases:
* 1. The data left in the file is less than block_min_sz;
* 2. We cannot find the break value until the end of this file.
*/
if (tail < block_min_sz || cur >= tail) {
if (tail > 0) {
if (file_descr->block_nr == file_descr->max_block_nr) {
seaf_warning ("Block id array is not large enough, bail out.\n");
free (buf);
return -1;
}
gint64 idx_size = tail;
WRITE_CDC_BLOCK (tail, write_data);
if (indexed)
*indexed += idx_size;
}
break;
}
/*
* A block is at least of size block_min_sz.
*/
if (cur < block_min_sz - 1)
cur = block_min_sz - 1;
while (cur < tail) {
fingerprint = (cur == block_min_sz - 1) ?
finger(buf + cur - BLOCK_WIN_SZ + 1, BLOCK_WIN_SZ) :
rolling_finger (fingerprint, BLOCK_WIN_SZ,
*(buf+cur-BLOCK_WIN_SZ), *(buf + cur));
/* get a chunk, write block info to chunk file */
if (((fingerprint & block_mask) == ((BREAK_VALUE & block_mask)))
|| cur + 1 >= file_descr->block_max_sz)
{
if (file_descr->block_nr == file_descr->max_block_nr) {
seaf_warning ("Block id array is not large enough, bail out.\n");
free (buf);
return -1;
}
gint64 idx_size = cur + 1;
WRITE_CDC_BLOCK (cur + 1, write_data);
if (indexed)
*indexed += idx_size;
break;
} else {
cur ++;
}
}
}
SHA1_Final (file_descr->file_sum, &file_ctx);
free (buf);
return 0;
}
int filename_chunk_cdc(const char *filename,
CDCFileDescriptor *file_descr,
SeafileCrypt *crypt,
gboolean write_data,
gint64 *indexed)
{
int fd_src = seaf_util_open (filename, O_RDONLY | O_BINARY);
if (fd_src < 0) {
seaf_warning ("CDC: failed to open %s.\n", filename);
return -1;
}
int ret = file_chunk_cdc (fd_src, file_descr, crypt, write_data, indexed);
close (fd_src);
return ret;
}
void cdc_init ()
{
rabin_init (BLOCK_WIN_SZ);
}
================================================
FILE: common/cdc/cdc.h
================================================
/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
#ifndef _CDC_H
#define _CDC_H
#include
#include
#ifdef HAVE_MD5
#include "md5.h"
#define get_checksum md5
#define CHECKSUM_LENGTH 16
#else
#include
#define get_checksum sha1
#define CHECKSUM_LENGTH 20
#endif
#ifndef O_BINARY
#define O_BINARY 0
#endif
struct _CDCFileDescriptor;
struct _CDCDescriptor;
struct SeafileCrypt;
typedef int (*WriteblockFunc)(const char *repo_id,
int version,
struct _CDCDescriptor *chunk_descr,
struct SeafileCrypt *crypt,
uint8_t *checksum,
gboolean write_data);
/* define chunk file header and block entry */
typedef struct _CDCFileDescriptor {
uint32_t block_min_sz;
uint32_t block_max_sz;
uint32_t block_sz;
uint64_t file_size;
uint32_t block_nr;
uint8_t *blk_sha1s;
int max_block_nr;
uint8_t file_sum[CHECKSUM_LENGTH];
WriteblockFunc write_block;
char repo_id[37];
int version;
} CDCFileDescriptor;
typedef struct _CDCDescriptor {
uint64_t offset;
uint32_t len;
uint8_t checksum[CHECKSUM_LENGTH];
char *block_buf;
int result;
} CDCDescriptor;
int file_chunk_cdc(int fd_src,
CDCFileDescriptor *file_descr,
struct SeafileCrypt *crypt,
gboolean write_data,
gint64 *indexed);
int filename_chunk_cdc(const char *filename,
CDCFileDescriptor *file_descr,
struct SeafileCrypt *crypt,
gboolean write_data,
gint64 *indexed);
void cdc_init ();
#endif
================================================
FILE: common/cdc/rabin-checksum.c
================================================
#include
#include "rabin-checksum.h"
#ifdef WIN32
#include
#ifndef u_int
typedef unsigned int u_int;
#endif
#ifndef u_char
typedef unsigned char u_char;
#endif
#ifndef u_short
typedef unsigned short u_short;
#endif
#ifndef u_long
typedef unsigned long u_long;
#endif
#ifndef u_int16_t
typedef uint16_t u_int16_t;
#endif
#ifndef u_int32_t
typedef uint32_t u_int32_t;
#endif
#ifndef u_int64_t
typedef uint64_t u_int64_t;
#endif
#endif
#define INT64(n) n##LL
#define MSB64 INT64(0x8000000000000000)
static u_int64_t poly = 0xbfe6b8a5bf378d83LL;
static u_int64_t T[256];
static u_int64_t U[256];
static int shift;
/* Highest bit set in a byte */
static const char bytemsb[0x100] = {
0, 1, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
};
/* Find last set (most significant bit) */
static inline u_int fls32 (u_int32_t v)
{
if (v & 0xffff0000) {
if (v & 0xff000000)
return 24 + bytemsb[v>>24];
else
return 16 + bytemsb[v>>16];
}
if (v & 0x0000ff00)
return 8 + bytemsb[v>>8];
else
return bytemsb[v];
}
static inline char fls64 (u_int64_t v)
{
u_int32_t h;
if ((h = v >> 32))
return 32 + fls32 (h);
else
return fls32 ((u_int32_t) v);
}
u_int64_t polymod (u_int64_t nh, u_int64_t nl, u_int64_t d)
{
int i = 0;
int k = fls64 (d) - 1;
d <<= 63 - k;
if (nh) {
if (nh & MSB64)
nh ^= d;
for (i = 62; i >= 0; i--)
if (nh & ((u_int64_t) 1) << i) {
nh ^= d >> (63 - i);
nl ^= d << (i + 1);
}
}
for (i = 63; i >= k; i--)
{
if (nl & INT64 (1) << i)
nl ^= d >> (63 - i);
}
return nl;
}
void polymult (u_int64_t *php, u_int64_t *plp, u_int64_t x, u_int64_t y)
{
int i;
u_int64_t ph = 0, pl = 0;
if (x & 1)
pl = y;
for (i = 1; i < 64; i++)
if (x & (INT64 (1) << i)) {
ph ^= y >> (64 - i);
pl ^= y << i;
}
if (php)
*php = ph;
if (plp)
*plp = pl;
}
u_int64_t polymmult (u_int64_t x, u_int64_t y, u_int64_t d)
{
u_int64_t h, l;
polymult (&h, &l, x, y);
return polymod (h, l, d);
}
static u_int64_t append8 (u_int64_t p, u_char m)
{
return ((p << 8) | m) ^ T[p >> shift];
}
static void calcT (u_int64_t poly)
{
int j = 0;
int xshift = fls64 (poly) - 1;
shift = xshift - 8;
u_int64_t T1 = polymod (0, INT64 (1) << xshift, poly);
for (j = 0; j < 256; j++) {
T[j] = polymmult (j, T1, poly) | ((u_int64_t) j << xshift);
}
}
static void calcU(int size)
{
int i;
u_int64_t sizeshift = 1;
for (i = 1; i < size; i++)
sizeshift = append8 (sizeshift, 0);
for (i = 0; i < 256; i++)
U[i] = polymmult (i, sizeshift, poly);
}
void rabin_init(int len)
{
calcT(poly);
calcU(len);
}
/*
* a simple 32 bit checksum that can be upadted from end
*/
unsigned int rabin_checksum(char *buf, int len)
{
int i;
unsigned int sum = 0;
for (i = 0; i < len; ++i) {
sum = rabin_rolling_checksum (sum, len, 0, buf[i]);
}
return sum;
}
unsigned int rabin_rolling_checksum(unsigned int csum, int len,
char c1, char c2)
{
return append8(csum ^ U[(unsigned char)c1], c2);
}
================================================
FILE: common/cdc/rabin-checksum.h
================================================
#ifndef _RABIN_CHECKSUM_H
#define _RABIN_CHECKSUM_H
unsigned int rabin_checksum(char *buf, int len);
unsigned int rabin_rolling_checksum(unsigned int csum, int len, char c1, char c2);
void rabin_init (int len);
#endif
================================================
FILE: common/commit-mgr.c
================================================
/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
#include "common.h"
#include "log.h"
#include
#include
#include "utils.h"
#include "db.h"
#include "searpc-utils.h"
#include "seafile-session.h"
#include "commit-mgr.h"
#include "seaf-utils.h"
#define MAX_TIME_SKEW 259200 /* 3 days */
struct _SeafCommitManagerPriv {
int dummy;
};
static SeafCommit *
load_commit (SeafCommitManager *mgr,
const char *repo_id, int version,
const char *commit_id);
static int
save_commit (SeafCommitManager *manager,
const char *repo_id, int version,
SeafCommit *commit);
static void
delete_commit (SeafCommitManager *mgr,
const char *repo_id, int version,
const char *id);
static json_t *
commit_to_json_object (SeafCommit *commit);
static SeafCommit *
commit_from_json_object (const char *id, json_t *object);
static void compute_commit_id (SeafCommit* commit)
{
SHA_CTX ctx;
uint8_t sha1[20];
gint64 ctime_n;
SHA1_Init (&ctx);
SHA1_Update (&ctx, commit->root_id, 41);
SHA1_Update (&ctx, commit->creator_id, 41);
if (commit->creator_name)
SHA1_Update (&ctx, commit->creator_name, strlen(commit->creator_name)+1);
SHA1_Update (&ctx, commit->desc, strlen(commit->desc)+1);
/* convert to network byte order */
ctime_n = hton64 (commit->ctime);
SHA1_Update (&ctx, &ctime_n, sizeof(ctime_n));
SHA1_Final (sha1, &ctx);
rawdata_to_hex (sha1, commit->commit_id, 20);
}
SeafCommit*
seaf_commit_new (const char *commit_id,
const char *repo_id,
const char *root_id,
const char *creator_name,
const char *creator_id,
const char *desc,
guint64 ctime)
{
SeafCommit *commit;
g_return_val_if_fail (repo_id != NULL, NULL);
g_return_val_if_fail (root_id != NULL && creator_id != NULL, NULL);
commit = g_new0 (SeafCommit, 1);
memcpy (commit->repo_id, repo_id, 36);
commit->repo_id[36] = '\0';
memcpy (commit->root_id, root_id, 40);
commit->root_id[40] = '\0';
commit->creator_name = g_strdup (creator_name);
memcpy (commit->creator_id, creator_id, 40);
commit->creator_id[40] = '\0';
commit->desc = g_strdup (desc);
if (ctime == 0) {
/* TODO: use more precise timer */
commit->ctime = (gint64)time(NULL);
} else
commit->ctime = ctime;
if (commit_id == NULL)
compute_commit_id (commit);
else {
memcpy (commit->commit_id, commit_id, 40);
commit->commit_id[40] = '\0';
}
commit->ref = 1;
return commit;
}
char *
seaf_commit_to_data (SeafCommit *commit, gsize *len)
{
json_t *object;
char *json_data;
char *ret;
object = commit_to_json_object (commit);
json_data = json_dumps (object, 0);
*len = strlen (json_data);
json_decref (object);
ret = g_strdup (json_data);
free (json_data);
return ret;
}
SeafCommit *
seaf_commit_from_data (const char *id, char *data, gsize len)
{
json_t *object;
SeafCommit *commit;
json_error_t jerror;
object = json_loadb (data, len, 0, &jerror);
if (!object) {
/* Perhaps the commit object contains invalid UTF-8 character. */
if (data[len-1] == 0)
clean_utf8_data (data, len - 1);
else
clean_utf8_data (data, len);
object = json_loadb (data, len, 0, &jerror);
if (!object) {
if (jerror.text)
seaf_warning ("Failed to load commit json: %s.\n", jerror.text);
else
seaf_warning ("Failed to load commit json.\n");
return NULL;
}
}
commit = commit_from_json_object (id, object);
json_decref (object);
return commit;
}
static void
seaf_commit_free (SeafCommit *commit)
{
g_free (commit->desc);
g_free (commit->creator_name);
if (commit->parent_id) g_free (commit->parent_id);
if (commit->second_parent_id) g_free (commit->second_parent_id);
if (commit->repo_name) g_free (commit->repo_name);
if (commit->repo_desc) g_free (commit->repo_desc);
if (commit->device_name) g_free (commit->device_name);
if (commit->repo_category) g_free (commit->repo_category);
if (commit->salt) g_free (commit->salt);
g_free (commit->client_version);
g_free (commit->magic);
g_free (commit->random_key);
g_free (commit->pwd_hash);
g_free (commit->pwd_hash_algo);
g_free (commit->pwd_hash_params);
g_free (commit);
}
void
seaf_commit_ref (SeafCommit *commit)
{
commit->ref++;
}
void
seaf_commit_unref (SeafCommit *commit)
{
if (!commit)
return;
if (--commit->ref <= 0)
seaf_commit_free (commit);
}
SeafCommitManager*
seaf_commit_manager_new (SeafileSession *seaf)
{
SeafCommitManager *mgr = g_new0 (SeafCommitManager, 1);
mgr->priv = g_new0 (SeafCommitManagerPriv, 1);
mgr->seaf = seaf;
mgr->obj_store = seaf_obj_store_new (mgr->seaf, "commits");
return mgr;
}
int
seaf_commit_manager_init (SeafCommitManager *mgr)
{
if (seaf_obj_store_init (mgr->obj_store) < 0) {
seaf_warning ("[commit mgr] Failed to init commit object store.\n");
return -1;
}
return 0;
}
#if 0
inline static void
add_commit_to_cache (SeafCommitManager *mgr, SeafCommit *commit)
{
g_hash_table_insert (mgr->priv->commit_cache,
g_strdup(commit->commit_id),
commit);
seaf_commit_ref (commit);
}
inline static void
remove_commit_from_cache (SeafCommitManager *mgr, SeafCommit *commit)
{
g_hash_table_remove (mgr->priv->commit_cache, commit->commit_id);
seaf_commit_unref (commit);
}
#endif
int
seaf_commit_manager_add_commit (SeafCommitManager *mgr,
SeafCommit *commit)
{
int ret;
/* add_commit_to_cache (mgr, commit); */
if ((ret = save_commit (mgr, commit->repo_id, commit->version, commit)) < 0)
return -1;
return 0;
}
void
seaf_commit_manager_del_commit (SeafCommitManager *mgr,
const char *repo_id,
int version,
const char *id)
{
g_return_if_fail (id != NULL);
#if 0
commit = g_hash_table_lookup(mgr->priv->commit_cache, id);
if (!commit)
goto delete;
/*
* Catch ref count bug here. We have bug in commit ref, the
* following assert can't pass. TODO: fix the commit ref bug
*/
/* g_assert (commit->ref <= 1); */
remove_commit_from_cache (mgr, commit);
delete:
#endif
delete_commit (mgr, repo_id, version, id);
}
SeafCommit*
seaf_commit_manager_get_commit (SeafCommitManager *mgr,
const char *repo_id,
int version,
const char *id)
{
SeafCommit *commit;
#if 0
commit = g_hash_table_lookup (mgr->priv->commit_cache, id);
if (commit != NULL) {
seaf_commit_ref (commit);
return commit;
}
#endif
commit = load_commit (mgr, repo_id, version, id);
if (!commit)
return NULL;
/* add_commit_to_cache (mgr, commit); */
return commit;
}
SeafCommit *
seaf_commit_manager_get_commit_compatible (SeafCommitManager *mgr,
const char *repo_id,
const char *id)
{
SeafCommit *commit = NULL;
/* First try version 1 layout. */
commit = seaf_commit_manager_get_commit (mgr, repo_id, 1, id);
if (commit)
return commit;
#if defined MIGRATION || defined SEAFILE_CLIENT
/* For compatibility with version 0. */
commit = seaf_commit_manager_get_commit (mgr, repo_id, 0, id);
#endif
return commit;
}
static gint
compare_commit_by_time (gconstpointer a, gconstpointer b, gpointer unused)
{
const SeafCommit *commit_a = a;
const SeafCommit *commit_b = b;
/* Latest commit comes first in the list. */
return (commit_b->ctime - commit_a->ctime);
}
inline static int
insert_parent_commit (GList **list, GHashTable *hash,
const char *repo_id, int version,
const char *parent_id, gboolean allow_truncate)
{
SeafCommit *p;
char *key;
if (g_hash_table_lookup (hash, parent_id) != NULL)
return 0;
p = seaf_commit_manager_get_commit (seaf->commit_mgr,
repo_id, version,
parent_id);
if (!p) {
if (allow_truncate)
return 0;
seaf_warning ("Failed to find commit %s\n", parent_id);
return -1;
}
*list = g_list_insert_sorted_with_data (*list, p,
compare_commit_by_time,
NULL);
key = g_strdup (parent_id);
g_hash_table_replace (hash, key, key);
return 0;
}
gboolean
seaf_commit_manager_traverse_commit_tree_with_limit (SeafCommitManager *mgr,
const char *repo_id,
int version,
const char *head,
CommitTraverseFunc func,
int limit,
void *data,
char **next_start_commit,
gboolean skip_errors)
{
SeafCommit *commit;
GList *list = NULL;
GHashTable *commit_hash;
gboolean ret = TRUE;
/* A hash table for recording id of traversed commits. */
commit_hash = g_hash_table_new_full (g_str_hash, g_str_equal, g_free, NULL);
commit = seaf_commit_manager_get_commit (mgr, repo_id, version, head);
if (!commit) {
seaf_warning ("Failed to find commit %s.\n", head);
g_hash_table_destroy (commit_hash);
return FALSE;
}
list = g_list_insert_sorted_with_data (list, commit,
compare_commit_by_time,
NULL);
char *key = g_strdup (commit->commit_id);
g_hash_table_replace (commit_hash, key, key);
int count = 0;
while (list) {
gboolean stop = FALSE;
commit = list->data;
list = g_list_delete_link (list, list);
if (!func (commit, data, &stop)) {
if (!skip_errors) {
seaf_commit_unref (commit);
ret = FALSE;
goto out;
}
}
if (stop) {
seaf_commit_unref (commit);
/* stop traverse down from this commit,
* but not stop traversing the tree
*/
continue;
}
if (commit->parent_id) {
if (insert_parent_commit (&list, commit_hash, repo_id, version,
commit->parent_id, FALSE) < 0) {
if (!skip_errors) {
seaf_commit_unref (commit);
ret = FALSE;
goto out;
}
}
}
if (commit->second_parent_id) {
if (insert_parent_commit (&list, commit_hash, repo_id, version,
commit->second_parent_id, FALSE) < 0) {
if (!skip_errors) {
seaf_commit_unref (commit);
ret = FALSE;
goto out;
}
}
}
seaf_commit_unref (commit);
/* Stop when limit is reached and don't stop at unmerged branch.
* If limit < 0, there is no limit;
*/
if (limit > 0 && ++count >= limit && (!list || !list->next)) {
break;
}
}
/*
* two scenarios:
* 1. list is empty, indicate scan end
* 2. list only have one commit, as start for next scan
*/
if (list) {
commit = list->data;
if (next_start_commit) {
*next_start_commit= g_strdup (commit->commit_id);
}
seaf_commit_unref (commit);
list = g_list_delete_link (list, list);
}
out:
g_hash_table_destroy (commit_hash);
while (list) {
commit = list->data;
seaf_commit_unref (commit);
list = g_list_delete_link (list, list);
}
return ret;
}
static gboolean
traverse_commit_tree_common (SeafCommitManager *mgr,
const char *repo_id,
int version,
const char *head,
CommitTraverseFunc func,
void *data,
gboolean skip_errors,
gboolean allow_truncate)
{
SeafCommit *commit;
GList *list = NULL;
GHashTable *commit_hash;
gboolean ret = TRUE;
commit = seaf_commit_manager_get_commit (mgr, repo_id, version, head);
if (!commit) {
seaf_warning ("Failed to find commit %s.\n", head);
// For head commit damaged, directly return FALSE
// user can repair head by fsck then retraverse the tree
return FALSE;
}
/* A hash table for recording id of traversed commits. */
commit_hash = g_hash_table_new_full (g_str_hash, g_str_equal, g_free, NULL);
list = g_list_insert_sorted_with_data (list, commit,
compare_commit_by_time,
NULL);
char *key = g_strdup (commit->commit_id);
g_hash_table_replace (commit_hash, key, key);
while (list) {
gboolean stop = FALSE;
commit = list->data;
list = g_list_delete_link (list, list);
if (!func (commit, data, &stop)) {
seaf_warning("[comit-mgr] CommitTraverseFunc failed\n");
/* If skip errors, continue to traverse parents. */
if (!skip_errors) {
seaf_commit_unref (commit);
ret = FALSE;
goto out;
}
}
if (stop) {
seaf_commit_unref (commit);
/* stop traverse down from this commit,
* but not stop traversing the tree
*/
continue;
}
if (commit->parent_id) {
if (insert_parent_commit (&list, commit_hash, repo_id, version,
commit->parent_id, allow_truncate) < 0) {
seaf_warning("[comit-mgr] insert parent commit failed\n");
/* If skip errors, try insert second parent. */
if (!skip_errors) {
seaf_commit_unref (commit);
ret = FALSE;
goto out;
}
}
}
if (commit->second_parent_id) {
if (insert_parent_commit (&list, commit_hash, repo_id, version,
commit->second_parent_id, allow_truncate) < 0) {
seaf_warning("[comit-mgr]insert second parent commit failed\n");
if (!skip_errors) {
seaf_commit_unref (commit);
ret = FALSE;
goto out;
}
}
}
seaf_commit_unref (commit);
}
out:
g_hash_table_destroy (commit_hash);
while (list) {
commit = list->data;
seaf_commit_unref (commit);
list = g_list_delete_link (list, list);
}
return ret;
}
gboolean
seaf_commit_manager_traverse_commit_tree (SeafCommitManager *mgr,
const char *repo_id,
int version,
const char *head,
CommitTraverseFunc func,
void *data,
gboolean skip_errors)
{
return traverse_commit_tree_common (mgr, repo_id, version, head,
func, data, skip_errors, FALSE);
}
gboolean
seaf_commit_manager_traverse_commit_tree_truncated (SeafCommitManager *mgr,
const char *repo_id,
int version,
const char *head,
CommitTraverseFunc func,
void *data,
gboolean skip_errors)
{
return traverse_commit_tree_common (mgr, repo_id, version, head,
func, data, skip_errors, TRUE);
}
gboolean
seaf_commit_manager_commit_exists (SeafCommitManager *mgr,
const char *repo_id,
int version,
const char *id)
{
#if 0
commit = g_hash_table_lookup (mgr->priv->commit_cache, id);
if (commit != NULL)
return TRUE;
#endif
return seaf_obj_store_obj_exists (mgr->obj_store, repo_id, version, id);
}
static json_t *
commit_to_json_object (SeafCommit *commit)
{
json_t *object;
object = json_object ();
json_object_set_string_member (object, "commit_id", commit->commit_id);
json_object_set_string_member (object, "root_id", commit->root_id);
json_object_set_string_member (object, "repo_id", commit->repo_id);
if (commit->creator_name)
json_object_set_string_member (object, "creator_name", commit->creator_name);
json_object_set_string_member (object, "creator", commit->creator_id);
json_object_set_string_member (object, "description", commit->desc);
json_object_set_int_member (object, "ctime", (gint64)commit->ctime);
json_object_set_string_or_null_member (object, "parent_id", commit->parent_id);
json_object_set_string_or_null_member (object, "second_parent_id",
commit->second_parent_id);
/*
* also save repo's properties to commit file, for easy sharing of
* repo info
*/
json_object_set_string_member (object, "repo_name", commit->repo_name);
json_object_set_string_member (object, "repo_desc",
commit->repo_desc);
json_object_set_string_or_null_member (object, "repo_category",
commit->repo_category);
if (commit->device_name)
json_object_set_string_member (object, "device_name", commit->device_name);
if (commit->client_version)
json_object_set_string_member (object, "client_version", commit->client_version);
if (commit->encrypted)
json_object_set_string_member (object, "encrypted", "true");
if (commit->encrypted) {
json_object_set_int_member (object, "enc_version", commit->enc_version);
// If pwd_hash is set, the magic field is no longer included in the commit of the newly created repo.
if (commit->enc_version >= 1 && !commit->pwd_hash)
json_object_set_string_member (object, "magic", commit->magic);
if (commit->enc_version >= 2)
json_object_set_string_member (object, "key", commit->random_key);
if (commit->enc_version >= 3)
json_object_set_string_member (object, "salt", commit->salt);
if (commit->pwd_hash) {
json_object_set_string_member (object, "pwd_hash", commit->pwd_hash);
json_object_set_string_member (object, "pwd_hash_algo", commit->pwd_hash_algo);
json_object_set_string_member (object, "pwd_hash_params", commit->pwd_hash_params);
}
}
if (commit->no_local_history)
json_object_set_int_member (object, "no_local_history", 1);
if (commit->version != 0)
json_object_set_int_member (object, "version", commit->version);
if (commit->conflict)
json_object_set_int_member (object, "conflict", 1);
if (commit->new_merge)
json_object_set_int_member (object, "new_merge", 1);
if (commit->repaired)
json_object_set_int_member (object, "repaired", 1);
return object;
}
static SeafCommit *
commit_from_json_object (const char *commit_id, json_t *object)
{
SeafCommit *commit = NULL;
const char *root_id;
const char *repo_id;
const char *creator_name = NULL;
const char *creator;
const char *desc;
gint64 ctime;
const char *parent_id, *second_parent_id;
const char *repo_name;
const char *repo_desc;
const char *repo_category;
const char *device_name;
const char *client_version;
const char *encrypted = NULL;
int enc_version = 0;
const char *magic = NULL;
const char *random_key = NULL;
const char *salt = NULL;
const char *pwd_hash = NULL;
const char *pwd_hash_algo = NULL;
const char *pwd_hash_params = NULL;
int no_local_history = 0;
int version = 0;
int conflict = 0, new_merge = 0;
int repaired = 0;
root_id = json_object_get_string_member (object, "root_id");
repo_id = json_object_get_string_member (object, "repo_id");
if (json_object_has_member (object, "creator_name"))
creator_name = json_object_get_string_or_null_member (object, "creator_name");
creator = json_object_get_string_member (object, "creator");
desc = json_object_get_string_member (object, "description");
if (!desc)
desc = "";
ctime = (guint64) json_object_get_int_member (object, "ctime");
parent_id = json_object_get_string_or_null_member (object, "parent_id");
second_parent_id = json_object_get_string_or_null_member (object, "second_parent_id");
repo_name = json_object_get_string_member (object, "repo_name");
if (!repo_name)
repo_name = "";
repo_desc = json_object_get_string_member (object, "repo_desc");
if (!repo_desc)
repo_desc = "";
repo_category = json_object_get_string_or_null_member (object, "repo_category");
device_name = json_object_get_string_or_null_member (object, "device_name");
client_version = json_object_get_string_or_null_member (object, "client_version");
if (json_object_has_member (object, "encrypted"))
encrypted = json_object_get_string_or_null_member (object, "encrypted");
if (encrypted && strcmp(encrypted, "true") == 0
&& json_object_has_member (object, "enc_version")) {
enc_version = json_object_get_int_member (object, "enc_version");
magic = json_object_get_string_member (object, "magic");
pwd_hash = json_object_get_string_member (object, "pwd_hash");
pwd_hash_algo = json_object_get_string_member (object, "pwd_hash_algo");
pwd_hash_params = json_object_get_string_member (object, "pwd_hash_params");
}
if (enc_version >= 2)
random_key = json_object_get_string_member (object, "key");
if (enc_version >= 3)
salt = json_object_get_string_member (object, "salt");
if (json_object_has_member (object, "no_local_history"))
no_local_history = json_object_get_int_member (object, "no_local_history");
if (json_object_has_member (object, "version"))
version = json_object_get_int_member (object, "version");
if (json_object_has_member (object, "new_merge"))
new_merge = json_object_get_int_member (object, "new_merge");
if (json_object_has_member (object, "conflict"))
conflict = json_object_get_int_member (object, "conflict");
if (json_object_has_member (object, "repaired"))
repaired = json_object_get_int_member (object, "repaired");
/* sanity check for incoming values. */
if (!repo_id || !is_uuid_valid(repo_id) ||
!root_id || !is_object_id_valid(root_id) ||
!creator || strlen(creator) != 40 ||
(parent_id && !is_object_id_valid(parent_id)) ||
(second_parent_id && !is_object_id_valid(second_parent_id)))
return commit;
// If pwd_hash is set, the magic field is no longer included in the commit of the newly created repo.
if (!magic)
magic = pwd_hash;
switch (enc_version) {
case 0:
break;
case 1:
if (!magic || strlen(magic) != 32)
return NULL;
break;
case 2:
if (!magic || strlen(magic) != 64)
return NULL;
if (!random_key || strlen(random_key) != 96)
return NULL;
break;
case 3:
if (!magic || strlen(magic) != 64)
return NULL;
if (!random_key || strlen(random_key) != 96)
return NULL;
if (!salt || strlen(salt) != 64)
return NULL;
break;
case 4:
if (!magic || strlen(magic) != 64)
return NULL;
if (!random_key || strlen(random_key) != 96)
return NULL;
if (!salt || strlen(salt) != 64)
return NULL;
break;
default:
seaf_warning ("Unknown encryption version %d.\n", enc_version);
return NULL;
}
char *creator_name_l = creator_name ? g_ascii_strdown (creator_name, -1) : NULL;
commit = seaf_commit_new (commit_id, repo_id, root_id,
creator_name_l, creator, desc, ctime);
g_free (creator_name_l);
commit->parent_id = parent_id ? g_strdup(parent_id) : NULL;
commit->second_parent_id = second_parent_id ? g_strdup(second_parent_id) : NULL;
commit->repo_name = g_strdup(repo_name);
commit->repo_desc = g_strdup(repo_desc);
if (encrypted && strcmp(encrypted, "true") == 0)
commit->encrypted = TRUE;
else
commit->encrypted = FALSE;
if (repo_category)
commit->repo_category = g_strdup(repo_category);
commit->device_name = g_strdup(device_name);
commit->client_version = g_strdup(client_version);
if (commit->encrypted) {
commit->enc_version = enc_version;
if (enc_version >= 1 && !pwd_hash)
commit->magic = g_strdup(magic);
if (enc_version >= 2)
commit->random_key = g_strdup (random_key);
if (enc_version >= 3)
commit->salt = g_strdup(salt);
if (pwd_hash) {
commit->pwd_hash = g_strdup (pwd_hash);
commit->pwd_hash_algo = g_strdup (pwd_hash_algo);
commit->pwd_hash_params = g_strdup (pwd_hash_params);
}
}
if (no_local_history)
commit->no_local_history = TRUE;
commit->version = version;
if (new_merge)
commit->new_merge = TRUE;
if (conflict)
commit->conflict = TRUE;
if (repaired)
commit->repaired = TRUE;
return commit;
}
static SeafCommit *
load_commit (SeafCommitManager *mgr,
const char *repo_id,
int version,
const char *commit_id)
{
char *data = NULL;
int len;
SeafCommit *commit = NULL;
json_t *object = NULL;
json_error_t jerror;
if (!commit_id || strlen(commit_id) != 40)
return NULL;
if (seaf_obj_store_read_obj (mgr->obj_store, repo_id, version,
commit_id, (void **)&data, &len) < 0)
return NULL;
object = json_loadb (data, len, 0, &jerror);
if (!object) {
/* Perhaps the commit object contains invalid UTF-8 character. */
if (data[len-1] == 0)
clean_utf8_data (data, len - 1);
else
clean_utf8_data (data, len);
object = json_loadb (data, len, 0, &jerror);
if (!object) {
if (jerror.text)
seaf_warning ("Failed to load commit json object: %s.\n", jerror.text);
else
seaf_warning ("Failed to load commit json object.\n");
goto out;
}
}
commit = commit_from_json_object (commit_id, object);
if (commit)
commit->manager = mgr;
out:
if (object) json_decref (object);
g_free (data);
return commit;
}
static int
save_commit (SeafCommitManager *manager,
const char *repo_id,
int version,
SeafCommit *commit)
{
json_t *object = NULL;
char *data;
gsize len;
if (seaf_obj_store_obj_exists (manager->obj_store,
repo_id, version,
commit->commit_id))
return 0;
object = commit_to_json_object (commit);
data = json_dumps (object, 0);
len = strlen (data);
json_decref (object);
#ifdef SEAFILE_SERVER
if (seaf_obj_store_write_obj (manager->obj_store,
repo_id, version,
commit->commit_id,
data, (int)len, TRUE) < 0) {
g_free (data);
return -1;
}
#else
if (seaf_obj_store_write_obj (manager->obj_store,
repo_id, version,
commit->commit_id,
data, (int)len, FALSE) < 0) {
g_free (data);
return -1;
}
#endif
free (data);
return 0;
}
static void
delete_commit (SeafCommitManager *mgr,
const char *repo_id,
int version,
const char *id)
{
seaf_obj_store_delete_obj (mgr->obj_store, repo_id, version, id);
}
int
seaf_commit_manager_remove_store (SeafCommitManager *mgr,
const char *store_id)
{
return seaf_obj_store_remove_store (mgr->obj_store, store_id);
}
================================================
FILE: common/commit-mgr.h
================================================
/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
#ifndef SEAF_COMMIT_MGR_H
#define SEAF_COMMIT_MGR_H
struct _SeafCommitManager;
typedef struct _SeafCommit SeafCommit;
#include
#include "db.h"
#include "obj-store.h"
struct _SeafCommit {
struct _SeafCommitManager *manager;
int ref;
char commit_id[41];
char repo_id[37];
char root_id[41]; /* the fs root */
char *desc;
char *creator_name;
char creator_id[41];
guint64 ctime; /* creation time */
char *parent_id;
char *second_parent_id;
char *repo_name;
char *repo_desc;
char *repo_category;
char *device_name;
char *client_version;
gboolean encrypted;
int enc_version;
char *magic;
char *random_key;
char *salt;
char *pwd_hash;
char *pwd_hash_algo;
char *pwd_hash_params;
gboolean no_local_history;
int version;
gboolean new_merge;
gboolean conflict;
gboolean repaired;
};
/**
* @commit_id: if this is NULL, will create a new id.
* @ctime: if this is 0, will use current time.
*
* Any new commit should be added to commit manager before used.
*/
SeafCommit *
seaf_commit_new (const char *commit_id,
const char *repo_id,
const char *root_id,
const char *author_name,
const char *creator_id,
const char *desc,
guint64 ctime);
char *
seaf_commit_to_data (SeafCommit *commit, gsize *len);
SeafCommit *
seaf_commit_from_data (const char *id, char *data, gsize len);
void
seaf_commit_ref (SeafCommit *commit);
void
seaf_commit_unref (SeafCommit *commit);
/* Set stop to TRUE if you want to stop traversing a branch in the history graph.
Note, if currently there are multi branches, this function will be called again.
So, set stop to TRUE not always stop traversing the history graph.
*/
typedef gboolean (*CommitTraverseFunc) (SeafCommit *commit, void *data, gboolean *stop);
struct _SeafileSession;
typedef struct _SeafCommitManager SeafCommitManager;
typedef struct _SeafCommitManagerPriv SeafCommitManagerPriv;
struct _SeafCommitManager {
struct _SeafileSession *seaf;
sqlite3 *db;
struct SeafObjStore *obj_store;
SeafCommitManagerPriv *priv;
};
SeafCommitManager *
seaf_commit_manager_new (struct _SeafileSession *seaf);
int
seaf_commit_manager_init (SeafCommitManager *mgr);
/**
* Add a commit to commit manager and persist it to disk.
* Any new commit should be added to commit manager before used.
* This function increments ref count of the commit object.
* Not MT safe.
*/
int
seaf_commit_manager_add_commit (SeafCommitManager *mgr, SeafCommit *commit);
/**
* Delete a commit from commit manager and permanently remove it from disk.
* A commit object to be deleted should have ref cournt <= 1.
* Not MT safe.
*/
void
seaf_commit_manager_del_commit (SeafCommitManager *mgr,
const char *repo_id,
int version,
const char *id);
/**
* Find a commit object.
* This function increments ref count of returned object.
* Not MT safe.
*/
SeafCommit*
seaf_commit_manager_get_commit (SeafCommitManager *mgr,
const char *repo_id,
int version,
const char *id);
/**
* Get a commit object, with compatibility between version 0 and version 1.
* It will first try to get commit with version 1 layout; if fails, will
* try version 0 layout for compatibility.
* This is useful for loading a repo. In that case, we don't know the version
* of the repo before loading its head commit.
*/
SeafCommit *
seaf_commit_manager_get_commit_compatible (SeafCommitManager *mgr,
const char *repo_id,
const char *id);
/**
* Traverse the commits DAG start from head in topological order.
* The ordering is based on commit time.
* return FALSE if some commits is missing, TRUE otherwise.
*/
gboolean
seaf_commit_manager_traverse_commit_tree (SeafCommitManager *mgr,
const char *repo_id,
int version,
const char *head,
CommitTraverseFunc func,
void *data,
gboolean skip_errors);
/*
* The same as the above function, but stops traverse down if parent commit
* doesn't exists, instead of returning error.
*/
gboolean
seaf_commit_manager_traverse_commit_tree_truncated (SeafCommitManager *mgr,
const char *repo_id,
int version,
const char *head,
CommitTraverseFunc func,
void *data,
gboolean skip_errors);
/**
* Works the same as seaf_commit_manager_traverse_commit_tree, but stops
* traversing when a total number of _limit_ commits is reached. If
* limit <= 0, there is no limit
*/
gboolean
seaf_commit_manager_traverse_commit_tree_with_limit (SeafCommitManager *mgr,
const char *repo_id,
int version,
const char *head,
CommitTraverseFunc func,
int limit,
void *data,
char **next_start_commit,
gboolean skip_errors);
gboolean
seaf_commit_manager_commit_exists (SeafCommitManager *mgr,
const char *repo_id,
int version,
const char *id);
int
seaf_commit_manager_remove_store (SeafCommitManager *mgr,
const char *store_id);
#endif
================================================
FILE: common/common.h
================================================
/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
#ifndef COMMON_H
#define COMMON_H
#ifdef HAVE_CONFIG_H
#include
#endif
#include
#include
#include /* uint32_t */
#include /* size_t */
#include
#include
#include
#include
#include
#include
#include
#define EMPTY_SHA1 "0000000000000000000000000000000000000000"
#define CURRENT_ENC_VERSION 3
#define DEFAULT_PROTO_VERSION 1
#define CURRENT_PROTO_VERSION 7
#define CURRENT_REPO_VERSION 1
/* For compatibility with the old protocol, use an UUID for signature.
* Listen manager on the server will use the new block tx protocol if it
* receives this signature as "token".
*/
#define BLOCK_PROTOCOL_SIGNATURE "529319a0-577f-4d6b-a6c3-3c20f56f290c"
#define SEAF_PATH_MAX 4096
#ifndef ccnet_warning
#define ccnet_warning(fmt, ...) g_warning("%s(%d): " fmt, __FILE__, __LINE__, ##__VA_ARGS__)
#endif
#ifndef ccnet_error
#define ccnet_error(fmt, ...) g_error("%s(%d): " fmt, __FILE__, __LINE__, ##__VA_ARGS__)
#endif
#ifndef ccnet_message
#define ccnet_message(fmt, ...) g_message("%s(%d): " fmt, __FILE__, __LINE__, ##__VA_ARGS__)
#endif
#ifndef ccnet_debug
#define ccnet_debug(fmt, ...) g_debug(fmt, ##__VA_ARGS__)
#endif
#define DEFAULT_CONFIG_DIR "~/.ccnet"
#endif
================================================
FILE: common/config-mgr.c
================================================
#include "common.h"
#include "config-mgr.h"
#include "seaf-db.h"
#include "log.h"
int
seaf_cfg_manager_init (SeafCfgManager *mgr)
{
char *sql;
int db_type = seaf_db_type(mgr->db);
if (db_type == SEAF_DB_TYPE_MYSQL)
sql = "CREATE TABLE IF NOT EXISTS SeafileConf ("
"id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, cfg_group VARCHAR(255) NOT NULL,"
"cfg_key VARCHAR(255) NOT NULL, value VARCHAR(255), property INTEGER) ENGINE=INNODB";
else
sql = "CREATE TABLE IF NOT EXISTS SeafileConf (cfg_group VARCHAR(255) NOT NULL,"
"cfg_key VARCHAR(255) NOT NULL, value VARCHAR(255), property INTEGER)";
if (seaf_db_query (mgr->db, sql) < 0)
return -1;
return 0;
}
SeafCfgManager *
seaf_cfg_manager_new (SeafileSession *session)
{
SeafCfgManager *mgr = g_new0 (SeafCfgManager, 1);
if (!mgr)
return NULL;
mgr->config = session->config;
mgr->db = session->db;
return mgr;
}
int
seaf_cfg_manager_set_config_int (SeafCfgManager *mgr,
const char *group,
const char *key,
int value)
{
char value_str[256];
snprintf (value_str, sizeof(value_str), "%d", value);
return seaf_cfg_manager_set_config (mgr, group, key, value_str);
}
int
seaf_cfg_manager_set_config_int64 (SeafCfgManager *mgr,
const char *group,
const char *key,
gint64 value)
{
char value_str[256];
snprintf (value_str, sizeof(value_str), "%"G_GINT64_FORMAT"", value);
return seaf_cfg_manager_set_config (mgr, group, key, value_str);
}
int
seaf_cfg_manager_set_config_string (SeafCfgManager *mgr,
const char *group,
const char *key,
const char *value)
{
char value_str[256];
snprintf (value_str, sizeof(value_str), "%s", value);
return seaf_cfg_manager_set_config (mgr, group, key, value_str);
}
int
seaf_cfg_manager_set_config_boolean (SeafCfgManager *mgr,
const char *group,
const char *key,
gboolean value)
{
char value_str[256];
if (value)
snprintf (value_str, sizeof(value_str), "true");
else
snprintf (value_str, sizeof(value_str), "false");
return seaf_cfg_manager_set_config (mgr, group, key, value_str);
}
int
seaf_cfg_manager_set_config (SeafCfgManager *mgr, const char *group, const char *key, const char *value)
{
gboolean exists, err = FALSE;
char *sql = "SELECT 1 FROM SeafileConf WHERE cfg_group=? AND cfg_key=?";
exists = seaf_db_statement_exists(mgr->db, sql, &err,
2, "string", group,
"string", key);
if (err) {
seaf_warning ("[db error]Failed to set config [%s:%s] to db.\n", group, key);
return -1;
}
if (exists)
sql = "UPDATE SeafileConf SET value=? WHERE cfg_group=? AND cfg_key=?";
else
sql = "INSERT INTO SeafileConf (value, cfg_group, cfg_key, property) VALUES "
"(?,?,?,0)";
if (seaf_db_statement_query (mgr->db, sql, 3,
"string", value, "string",
group, "string", key) < 0) {
seaf_warning ("Failed to set config [%s:%s] to db.\n", group, key);
return -1;
}
return 0;
}
int
seaf_cfg_manager_get_config_int (SeafCfgManager *mgr, const char *group, const char *key)
{
int ret;
char *invalid = NULL;
char *value = seaf_cfg_manager_get_config (mgr, group, key);
if (!value) {
GError *err = NULL;
ret = g_key_file_get_integer (mgr->config, group, key, &err);
if (err) {
ret = -1;
g_clear_error(&err);
}
} else {
ret = strtol (value, &invalid, 10);
if (*invalid != '\0') {
ret = -1;
seaf_warning ("Value of config [%s:%s] is invalid: [%s]\n", group, key, value);
}
g_free (value);
}
return ret;
}
gint64
seaf_cfg_manager_get_config_int64 (SeafCfgManager *mgr, const char *group, const char *key)
{
gint64 ret;
char *invalid = NULL;
char *value = seaf_cfg_manager_get_config (mgr, group, key);
if (!value) {
GError *err = NULL;
ret = g_key_file_get_int64(mgr->config, group, key, &err);
if (err) {
ret = -1;
g_clear_error(&err);
}
} else {
ret = strtoll (value, &invalid, 10);
if (*invalid != '\0') {
seaf_warning ("Value of config [%s:%s] is invalid: [%s]\n", group, key, value);
ret = -1;
}
g_free (value);
}
return ret;
}
gboolean
seaf_cfg_manager_get_config_boolean (SeafCfgManager *mgr, const char *group, const char *key)
{
gboolean ret;
char *value = seaf_cfg_manager_get_config (mgr, group, key);
if (!value) {
GError *err = NULL;
ret = g_key_file_get_boolean(mgr->config, group, key, &err);
if (err) {
seaf_warning ("Config [%s:%s] not set, default is false.\n", group, key);
ret = FALSE;
g_clear_error(&err);
}
} else {
if (strcmp ("true", value) == 0)
ret = TRUE;
else
ret = FALSE;
g_free (value);
}
return ret;
}
char *
seaf_cfg_manager_get_config_string (SeafCfgManager *mgr, const char *group, const char *key)
{
char *ret = NULL;
char *value = seaf_cfg_manager_get_config (mgr, group, key);
if (!value) {
ret = g_key_file_get_string (mgr->config, group, key, NULL);
if (ret != NULL)
ret = g_strstrip(ret);
} else {
ret = value;
}
return ret;
}
char *
seaf_cfg_manager_get_config (SeafCfgManager *mgr, const char *group, const char *key)
{
char *sql = "SELECT value FROM SeafileConf WHERE cfg_group=? AND cfg_key=?";
char *value = seaf_db_statement_get_string(mgr->db, sql,
2, "string", group, "string", key);
if (value != NULL)
value = g_strstrip(value);
return value;
}
================================================
FILE: common/config-mgr.h
================================================
#ifndef SEAF_CONFIG_MGR_H
#define SEAF_CONFIG_MGR_H
typedef struct _SeafCfgManager SeafCfgManager;
#include "seafile-session.h"
struct _SeafCfgManager {
GKeyFile *config;
SeafDB *db;
};
typedef struct _SeafileSession SeafileSession;
SeafCfgManager *
seaf_cfg_manager_new (SeafileSession *seaf);
int
seaf_cfg_manager_set_config (SeafCfgManager *mgr, const char *group, const char *key, const char *value);
char *
seaf_cfg_manager_get_config (SeafCfgManager *mgr, const char *group, const char *key);
int
seaf_cfg_manager_set_config_int (SeafCfgManager *mgr, const char *group, const char *key, int value);
int
seaf_cfg_manager_get_config_int (SeafCfgManager *mgr, const char *group, const char *key);
int
seaf_cfg_manager_set_config_int64 (SeafCfgManager *mgr, const char *group, const char *key, gint64 value);
gint64
seaf_cfg_manager_get_config_int64 (SeafCfgManager *mgr, const char *group, const char *key);
int
seaf_cfg_manager_set_config_string (SeafCfgManager *mgr, const char *group, const char *key, const char *value);
char *
seaf_cfg_manager_get_config_string (SeafCfgManager *mgr, const char *group, const char *key);
int
seaf_cfg_manager_set_config_boolean (SeafCfgManager *mgr, const char *group, const char *key, gboolean value);
gboolean
seaf_cfg_manager_get_config_boolean (SeafCfgManager *mgr, const char *group, const char *key);
int
seaf_cfg_manager_init (SeafCfgManager *mgr);
#endif /* SEAF_CONFIG_MGR_H */
================================================
FILE: common/diff-simple.c
================================================
#include "common.h"
#include "diff-simple.h"
#include "utils.h"
#include "log.h"
DiffEntry *
diff_entry_new (char type, char status, unsigned char *sha1, const char *name)
{
DiffEntry *de = g_new0 (DiffEntry, 1);
de->type = type;
de->status = status;
memcpy (de->sha1, sha1, 20);
de->name = g_strdup(name);
return de;
}
DiffEntry *
diff_entry_new_from_dirent (char type, char status,
SeafDirent *dent, const char *basedir)
{
DiffEntry *de = g_new0 (DiffEntry, 1);
unsigned char sha1[20];
char *path;
hex_to_rawdata (dent->id, sha1, 20);
path = g_strconcat (basedir, dent->name, NULL);
de->type = type;
de->status = status;
memcpy (de->sha1, sha1, 20);
de->name = path;
de->size = dent->size;
#ifdef SEAFILE_CLIENT
if (type == DIFF_TYPE_COMMITS &&
(status == DIFF_STATUS_ADDED ||
status == DIFF_STATUS_MODIFIED ||
status == DIFF_STATUS_DIR_ADDED ||
status == DIFF_STATUS_DIR_DELETED)) {
de->mtime = dent->mtime;
de->mode = dent->mode;
de->modifier = g_strdup(dent->modifier);
}
#endif
return de;
}
void
diff_entry_free (DiffEntry *de)
{
g_free (de->name);
if (de->new_name)
g_free (de->new_name);
#ifdef SEAFILE_CLIENT
g_free (de->modifier);
#endif
g_free (de);
}
inline static gboolean
dirent_same (SeafDirent *denta, SeafDirent *dentb)
{
return (strcmp (dentb->id, denta->id) == 0 &&
denta->mode == dentb->mode &&
denta->mtime == dentb->mtime);
}
static int
diff_files (int n, SeafDirent *dents[], const char *basedir, DiffOptions *opt)
{
SeafDirent *files[3];
int i, n_files = 0;
memset (files, 0, sizeof(files[0])*n);
for (i = 0; i < n; ++i) {
if (dents[i] && S_ISREG(dents[i]->mode)) {
files[i] = dents[i];
++n_files;
}
}
if (n_files == 0)
return 0;
return opt->file_cb (n, basedir, files, opt->data);
}
static int
diff_trees_recursive (int n, SeafDir *trees[],
const char *basedir, DiffOptions *opt);
static int
diff_directories (int n, SeafDirent *dents[], const char *basedir, DiffOptions *opt)
{
SeafDirent *dirs[3];
int i, n_dirs = 0;
char *dirname = "";
int ret;
SeafDir *sub_dirs[3], *dir;
memset (dirs, 0, sizeof(dirs[0])*n);
for (i = 0; i < n; ++i) {
if (dents[i] && S_ISDIR(dents[i]->mode)) {
dirs[i] = dents[i];
++n_dirs;
}
}
if (n_dirs == 0)
return 0;
gboolean recurse = TRUE;
ret = opt->dir_cb (n, basedir, dirs, opt->data, &recurse);
if (ret < 0)
return ret;
if (!recurse)
return 0;
memset (sub_dirs, 0, sizeof(sub_dirs[0])*n);
for (i = 0; i < n; ++i) {
if (dents[i] != NULL && S_ISDIR(dents[i]->mode)) {
dir = seaf_fs_manager_get_seafdir (seaf->fs_mgr,
opt->store_id,
opt->version,
dents[i]->id);
if (!dir) {
seaf_warning ("Failed to find dir %s:%s.\n",
opt->store_id, dents[i]->id);
ret = -1;
goto free_sub_dirs;
}
sub_dirs[i] = dir;
dirname = dents[i]->name;
}
}
char *new_basedir = g_strconcat (basedir, dirname, "/", NULL);
ret = diff_trees_recursive (n, sub_dirs, new_basedir, opt);
g_free (new_basedir);
free_sub_dirs:
for (i = 0; i < n; ++i)
seaf_dir_free (sub_dirs[i]);
return ret;
}
static int
diff_trees_recursive (int n, SeafDir *trees[],
const char *basedir, DiffOptions *opt)
{
GList *ptrs[3];
SeafDirent *dents[3];
int i;
SeafDirent *dent;
char *first_name;
gboolean done;
int ret = 0;
for (i = 0; i < n; ++i) {
if (trees[i])
ptrs[i] = trees[i]->entries;
else
ptrs[i] = NULL;
}
while (1) {
first_name = NULL;
memset (dents, 0, sizeof(dents[0])*n);
done = TRUE;
/* Find the "largest" name, assuming dirents are sorted. */
for (i = 0; i < n; ++i) {
if (ptrs[i] != NULL) {
done = FALSE;
dent = ptrs[i]->data;
if (!first_name)
first_name = dent->name;
else if (strcmp(dent->name, first_name) > 0)
first_name = dent->name;
}
}
if (done)
break;
/*
* Setup dir entries for all names that equal to first_name
*/
for (i = 0; i < n; ++i) {
if (ptrs[i] != NULL) {
dent = ptrs[i]->data;
if (strcmp(first_name, dent->name) == 0) {
dents[i] = dent;
ptrs[i] = ptrs[i]->next;
}
}
}
if (n == 2 && dents[0] && dents[1] && dirent_same(dents[0], dents[1]))
continue;
if (n == 3 && dents[0] && dents[1] && dents[2] &&
dirent_same(dents[0], dents[1]) && dirent_same(dents[0], dents[2]))
continue;
/* Diff files of this level. */
ret = diff_files (n, dents, basedir, opt);
if (ret < 0)
return ret;
/* Recurse into sub level. */
ret = diff_directories (n, dents, basedir, opt);
if (ret < 0)
return ret;
}
return ret;
}
int
diff_trees (int n, const char *roots[], DiffOptions *opt)
{
SeafDir **trees, *root;
int i, ret;
g_return_val_if_fail (n == 2 || n == 3, -1);
trees = g_new0 (SeafDir *, n);
for (i = 0; i < n; ++i) {
root = seaf_fs_manager_get_seafdir (seaf->fs_mgr,
opt->store_id,
opt->version,
roots[i]);
if (!root) {
seaf_warning ("Failed to find dir %s:%s.\n", opt->store_id, roots[i]);
g_free (trees);
return -1;
}
trees[i] = root;
}
ret = diff_trees_recursive (n, trees, "", opt);
for (i = 0; i < n; ++i)
seaf_dir_free (trees[i]);
g_free (trees);
return ret;
}
typedef struct DiffData {
GList **results;
gboolean fold_dir_diff;
} DiffData;
static int
twoway_diff_files (int n, const char *basedir, SeafDirent *files[], void *vdata)
{
DiffData *data = vdata;
GList **results = data->results;
DiffEntry *de;
SeafDirent *tree1 = files[0];
SeafDirent *tree2 = files[1];
if (!tree1) {
de = diff_entry_new_from_dirent (DIFF_TYPE_COMMITS, DIFF_STATUS_ADDED,
tree2, basedir);
*results = g_list_prepend (*results, de);
return 0;
}
if (!tree2) {
de = diff_entry_new_from_dirent (DIFF_TYPE_COMMITS, DIFF_STATUS_DELETED,
tree1, basedir);
*results = g_list_prepend (*results, de);
return 0;
}
if (!dirent_same (tree1, tree2)) {
de = diff_entry_new_from_dirent (DIFF_TYPE_COMMITS, DIFF_STATUS_MODIFIED,
tree2, basedir);
de->origin_size = tree1->size;
*results = g_list_prepend (*results, de);
}
return 0;
}
static int
twoway_diff_dirs (int n, const char *basedir, SeafDirent *dirs[], void *vdata,
gboolean *recurse)
{
DiffData *data = vdata;
GList **results = data->results;
DiffEntry *de;
SeafDirent *tree1 = dirs[0];
SeafDirent *tree2 = dirs[1];
if (!tree1) {
if (strcmp (tree2->id, EMPTY_SHA1) == 0 || data->fold_dir_diff) {
de = diff_entry_new_from_dirent (DIFF_TYPE_COMMITS, DIFF_STATUS_DIR_ADDED,
tree2, basedir);
*results = g_list_prepend (*results, de);
*recurse = FALSE;
} else
*recurse = TRUE;
return 0;
}
if (!tree2) {
de = diff_entry_new_from_dirent (DIFF_TYPE_COMMITS,
DIFF_STATUS_DIR_DELETED,
tree1, basedir);
*results = g_list_prepend (*results, de);
if (data->fold_dir_diff) {
*recurse = FALSE;
} else
*recurse = TRUE;
return 0;
}
return 0;
}
int
diff_commits (SeafCommit *commit1, SeafCommit *commit2, GList **results,
gboolean fold_dir_diff)
{
SeafRepo *repo = NULL;
DiffOptions opt;
const char *roots[2];
repo = seaf_repo_manager_get_repo (seaf->repo_mgr, commit1->repo_id);
if (!repo) {
seaf_warning ("Failed to get repo %s.\n", commit1->repo_id);
return -1;
}
DiffData data;
memset (&data, 0, sizeof(data));
data.results = results;
data.fold_dir_diff = fold_dir_diff;
memset (&opt, 0, sizeof(opt));
#ifdef SEAFILE_SERVER
memcpy (opt.store_id, repo->store_id, 36);
#else
memcpy (opt.store_id, repo->id, 36);
#endif
opt.version = repo->version;
opt.file_cb = twoway_diff_files;
opt.dir_cb = twoway_diff_dirs;
opt.data = &data;
#ifdef SEAFILE_SERVER
seaf_repo_unref (repo);
#endif
roots[0] = commit1->root_id;
roots[1] = commit2->root_id;
diff_trees (2, roots, &opt);
diff_resolve_renames (results);
return 0;
}
int
diff_commit_roots (const char *store_id, int version,
const char *root1, const char *root2, GList **results,
gboolean fold_dir_diff)
{
DiffOptions opt;
const char *roots[2];
DiffData data;
memset (&data, 0, sizeof(data));
data.results = results;
data.fold_dir_diff = fold_dir_diff;
memset (&opt, 0, sizeof(opt));
memcpy (opt.store_id, store_id, 36);
opt.version = version;
opt.file_cb = twoway_diff_files;
opt.dir_cb = twoway_diff_dirs;
opt.data = &data;
roots[0] = root1;
roots[1] = root2;
diff_trees (2, roots, &opt);
diff_resolve_renames (results);
return 0;
}
static int
threeway_diff_files (int n, const char *basedir, SeafDirent *files[], void *vdata)
{
DiffData *data = vdata;
SeafDirent *m = files[0];
SeafDirent *p1 = files[1];
SeafDirent *p2 = files[2];
GList **results = data->results;
DiffEntry *de;
/* diff m with both p1 and p2. */
if (m && p1 && p2) {
if (!dirent_same(m, p1) && !dirent_same (m, p2)) {
de = diff_entry_new_from_dirent (DIFF_TYPE_COMMITS, DIFF_STATUS_MODIFIED,
m, basedir);
*results = g_list_prepend (*results, de);
}
} else if (!m && p1 && p2) {
de = diff_entry_new_from_dirent (DIFF_TYPE_COMMITS, DIFF_STATUS_DELETED,
p1, basedir);
*results = g_list_prepend (*results, de);
} else if (m && !p1 && p2) {
if (!dirent_same (m, p2)) {
de = diff_entry_new_from_dirent (DIFF_TYPE_COMMITS, DIFF_STATUS_MODIFIED,
m, basedir);
*results = g_list_prepend (*results, de);
}
} else if (m && p1 && !p2) {
if (!dirent_same (m, p1)) {
de = diff_entry_new_from_dirent (DIFF_TYPE_COMMITS, DIFF_STATUS_MODIFIED,
m, basedir);
*results = g_list_prepend (*results, de);
}
} else if (m && !p1 && !p2) {
de = diff_entry_new_from_dirent (DIFF_TYPE_COMMITS, DIFF_STATUS_ADDED,
m, basedir);
*results = g_list_prepend (*results, de);
}
/* Nothing to do for:
* 1. !m && p1 && !p2;
* 2. !m && !p1 && p2;
* 3. !m && !p1 && !p2 (should not happen)
*/
return 0;
}
static int
threeway_diff_dirs (int n, const char *basedir, SeafDirent *dirs[], void *vdata,
gboolean *recurse)
{
*recurse = TRUE;
return 0;
}
int
diff_merge (SeafCommit *merge, GList **results, gboolean fold_dir_diff)
{
SeafRepo *repo = NULL;
DiffOptions opt;
const char *roots[3];
SeafCommit *parent1, *parent2;
g_return_val_if_fail (*results == NULL, -1);
g_return_val_if_fail (merge->parent_id != NULL &&
merge->second_parent_id != NULL,
-1);
repo = seaf_repo_manager_get_repo (seaf->repo_mgr, merge->repo_id);
if (!repo) {
seaf_warning ("Failed to get repo %s.\n", merge->repo_id);
return -1;
}
parent1 = seaf_commit_manager_get_commit (seaf->commit_mgr,
repo->id,
repo->version,
merge->parent_id);
if (!parent1) {
seaf_warning ("failed to find commit %s:%s.\n", repo->id, merge->parent_id);
return -1;
}
parent2 = seaf_commit_manager_get_commit (seaf->commit_mgr,
repo->id,
repo->version,
merge->second_parent_id);
if (!parent2) {
seaf_warning ("failed to find commit %s:%s.\n",
repo->id, merge->second_parent_id);
seaf_commit_unref (parent1);
return -1;
}
DiffData data;
memset (&data, 0, sizeof(data));
data.results = results;
data.fold_dir_diff = fold_dir_diff;
memset (&opt, 0, sizeof(opt));
#ifdef SEAFILE_SERVER
memcpy (opt.store_id, repo->store_id, 36);
#else
memcpy (opt.store_id, repo->id, 36);
#endif
opt.version = repo->version;
opt.file_cb = threeway_diff_files;
opt.dir_cb = threeway_diff_dirs;
opt.data = &data;
#ifdef SEAFILE_SERVER
seaf_repo_unref (repo);
#endif
roots[0] = merge->root_id;
roots[1] = parent1->root_id;
roots[2] = parent2->root_id;
int ret = diff_trees (3, roots, &opt);
diff_resolve_renames (results);
seaf_commit_unref (parent1);
seaf_commit_unref (parent2);
return ret;
}
int
diff_merge_roots (const char *store_id, int version,
const char *merged_root, const char *p1_root, const char *p2_root,
GList **results, gboolean fold_dir_diff)
{
DiffOptions opt;
const char *roots[3];
g_return_val_if_fail (*results == NULL, -1);
DiffData data;
memset (&data, 0, sizeof(data));
data.results = results;
data.fold_dir_diff = fold_dir_diff;
memset (&opt, 0, sizeof(opt));
memcpy (opt.store_id, store_id, 36);
opt.version = version;
opt.file_cb = threeway_diff_files;
opt.dir_cb = threeway_diff_dirs;
opt.data = &data;
roots[0] = merged_root;
roots[1] = p1_root;
roots[2] = p2_root;
diff_trees (3, roots, &opt);
diff_resolve_renames (results);
return 0;
}
/* This function only resolve "strict" rename, i.e. two files must be
* exactly the same.
* Don't detect rename of empty files and empty dirs.
*/
void
diff_resolve_renames (GList **diff_entries)
{
GHashTable *deleted_files = NULL, *deleted_dirs = NULL;
GList *p;
GList *added = NULL;
DiffEntry *de;
unsigned char empty_sha1[20];
unsigned int deleted_empty_count = 0, deleted_empty_dir_count = 0;
unsigned int added_empty_count = 0, added_empty_dir_count = 0;
gboolean check_empty_dir, check_empty_file;
memset (empty_sha1, 0, 20);
/* Hash and equal functions for raw sha1. */
deleted_dirs = g_hash_table_new (ccnet_sha1_hash, ccnet_sha1_equal);
deleted_files = g_hash_table_new (ccnet_sha1_hash, ccnet_sha1_equal);
/* Count deleted and added entries of which content is empty. */
for (p = *diff_entries; p != NULL; p = p->next) {
de = p->data;
if (memcmp (de->sha1, empty_sha1, 20) == 0) {
if (de->status == DIFF_STATUS_DELETED)
deleted_empty_count++;
if (de->status == DIFF_STATUS_DIR_DELETED)
deleted_empty_dir_count++;
if (de->status == DIFF_STATUS_ADDED)
added_empty_count++;
if (de->status == DIFF_STATUS_DIR_ADDED)
added_empty_dir_count++;
}
}
check_empty_dir = (deleted_empty_dir_count == 1 && added_empty_dir_count == 1);
check_empty_file = (deleted_empty_count == 1 && added_empty_count == 1);
/* Collect all "deleted" entries. */
for (p = *diff_entries; p != NULL; p = p->next) {
de = p->data;
if (de->status == DIFF_STATUS_DELETED) {
if (memcmp (de->sha1, empty_sha1, 20) == 0 &&
check_empty_file == FALSE)
continue;
g_hash_table_insert (deleted_files, de->sha1, p);
}
if (de->status == DIFF_STATUS_DIR_DELETED) {
if (memcmp (de->sha1, empty_sha1, 20) == 0 &&
check_empty_dir == FALSE)
continue;
g_hash_table_insert (deleted_dirs, de->sha1, p);
}
}
/* Collect all "added" entries into a separate list. */
for (p = *diff_entries; p != NULL; p = p->next) {
de = p->data;
if (de->status == DIFF_STATUS_ADDED) {
if (memcmp (de->sha1, empty_sha1, 20) == 0 &&
check_empty_file == 0)
continue;
added = g_list_prepend (added, p);
}
if (de->status == DIFF_STATUS_DIR_ADDED) {
if (memcmp (de->sha1, empty_sha1, 20) == 0 &&
check_empty_dir == 0)
continue;
added = g_list_prepend (added, p);
}
}
/* For each "added" entry, if we find a "deleted" entry with
* the same content, we find a rename pair.
*/
p = added;
while (p != NULL) {
GList *p_add, *p_del;
DiffEntry *de_add, *de_del, *de_rename;
int rename_status;
p_add = p->data;
de_add = p_add->data;
if (de_add->status == DIFF_STATUS_ADDED)
p_del = g_hash_table_lookup (deleted_files, de_add->sha1);
else
p_del = g_hash_table_lookup (deleted_dirs, de_add->sha1);
if (p_del) {
de_del = p_del->data;
if (de_add->status == DIFF_STATUS_DIR_ADDED)
rename_status = DIFF_STATUS_DIR_RENAMED;
else
rename_status = DIFF_STATUS_RENAMED;
de_rename = diff_entry_new (de_del->type, rename_status,
de_del->sha1, de_del->name);
de_rename->new_name = g_strdup(de_add->name);
*diff_entries = g_list_delete_link (*diff_entries, p_add);
*diff_entries = g_list_delete_link (*diff_entries, p_del);
*diff_entries = g_list_prepend (*diff_entries, de_rename);
if (de_del->status == DIFF_STATUS_DIR_DELETED)
g_hash_table_remove (deleted_dirs, de_add->sha1);
else
g_hash_table_remove (deleted_files, de_add->sha1);
diff_entry_free (de_add);
diff_entry_free (de_del);
}
p = g_list_delete_link (p, p);
}
g_hash_table_destroy (deleted_dirs);
g_hash_table_destroy (deleted_files);
}
static gboolean
is_redundant_empty_dir (DiffEntry *de_dir, DiffEntry *de_file)
{
int dir_len;
if (de_dir->status == DIFF_STATUS_DIR_ADDED &&
de_file->status == DIFF_STATUS_DELETED)
{
dir_len = strlen (de_dir->name);
if (strlen (de_file->name) > dir_len &&
strncmp (de_dir->name, de_file->name, dir_len) == 0)
return TRUE;
}
if (de_dir->status == DIFF_STATUS_DIR_DELETED &&
de_file->status == DIFF_STATUS_ADDED)
{
dir_len = strlen (de_dir->name);
if (strlen (de_file->name) > dir_len &&
strncmp (de_dir->name, de_file->name, dir_len) == 0)
return TRUE;
}
return FALSE;
}
/*
* An empty dir entry may be added by deleting all the files under it.
* Similarly, an empty dir entry may be deleted by adding some file in it.
* In both cases, we don't want to include the empty dir entry in the
* diff results.
*/
void
diff_resolve_empty_dirs (GList **diff_entries)
{
GList *empty_dirs = NULL;
GList *p, *dir, *file;
DiffEntry *de, *de_dir, *de_file;
for (p = *diff_entries; p != NULL; p = p->next) {
de = p->data;
if (de->status == DIFF_STATUS_DIR_ADDED ||
de->status == DIFF_STATUS_DIR_DELETED)
empty_dirs = g_list_prepend (empty_dirs, p);
}
for (dir = empty_dirs; dir != NULL; dir = dir->next) {
de_dir = ((GList *)dir->data)->data;
for (file = *diff_entries; file != NULL; file = file->next) {
de_file = file->data;
if (is_redundant_empty_dir (de_dir, de_file)) {
*diff_entries = g_list_delete_link (*diff_entries, dir->data);
break;
}
}
}
g_list_free (empty_dirs);
}
int diff_unmerged_state(int mask)
{
mask >>= 1;
switch (mask) {
case 7:
return STATUS_UNMERGED_BOTH_CHANGED;
case 3:
return STATUS_UNMERGED_OTHERS_REMOVED;
case 5:
return STATUS_UNMERGED_I_REMOVED;
case 6:
return STATUS_UNMERGED_BOTH_ADDED;
case 2:
return STATUS_UNMERGED_DFC_I_ADDED_FILE;
case 4:
return STATUS_UNMERGED_DFC_OTHERS_ADDED_FILE;
default:
seaf_warning ("Unexpected unmerged case\n");
}
return 0;
}
char *
format_diff_results(GList *results)
{
GList *ptr;
GString *fmt_status;
DiffEntry *de;
fmt_status = g_string_new("");
for (ptr = results; ptr; ptr = ptr->next) {
de = ptr->data;
if (de->status != DIFF_STATUS_RENAMED)
g_string_append_printf(fmt_status, "%c %c %d %u %s\n",
de->type, de->status, de->unmerge_state,
(int)strlen(de->name), de->name);
else
g_string_append_printf(fmt_status, "%c %c %d %u %s %u %s\n",
de->type, de->status, de->unmerge_state,
(int)strlen(de->name), de->name,
(int)strlen(de->new_name), de->new_name);
}
return g_string_free(fmt_status, FALSE);
}
inline static char *
get_basename (char *path)
{
char *slash;
slash = strrchr (path, '/');
if (!slash)
return path;
return (slash + 1);
}
char *
diff_results_to_description (GList *results)
{
GList *p;
DiffEntry *de;
char *add_mod_file = NULL, *removed_file = NULL;
char *renamed_file = NULL, *renamed_dir = NULL;
char *new_dir = NULL, *removed_dir = NULL;
int n_add_mod = 0, n_removed = 0, n_renamed = 0;
int n_new_dir = 0, n_removed_dir = 0, n_renamed_dir = 0;
GString *desc;
if (results == NULL)
return NULL;
for (p = results; p != NULL; p = p->next) {
de = p->data;
switch (de->status) {
case DIFF_STATUS_ADDED:
if (n_add_mod == 0)
add_mod_file = get_basename(de->name);
n_add_mod++;
break;
case DIFF_STATUS_DELETED:
if (n_removed == 0)
removed_file = get_basename(de->name);
n_removed++;
break;
case DIFF_STATUS_RENAMED:
if (n_renamed == 0)
renamed_file = get_basename(de->name);
n_renamed++;
break;
case DIFF_STATUS_MODIFIED:
if (n_add_mod == 0)
add_mod_file = get_basename(de->name);
n_add_mod++;
break;
case DIFF_STATUS_DIR_ADDED:
if (n_new_dir == 0)
new_dir = get_basename(de->name);
n_new_dir++;
break;
case DIFF_STATUS_DIR_DELETED:
if (n_removed_dir == 0)
removed_dir = get_basename(de->name);
n_removed_dir++;
break;
case DIFF_STATUS_DIR_RENAMED:
if (n_renamed_dir == 0)
renamed_dir = get_basename(de->name);
n_renamed_dir++;
break;
}
}
desc = g_string_new ("");
if (n_add_mod == 1)
g_string_append_printf (desc, "Added or modified \"%s\".\n", add_mod_file);
else if (n_add_mod > 1)
g_string_append_printf (desc, "Added or modified \"%s\" and %d more files.\n",
add_mod_file, n_add_mod - 1);
if (n_removed == 1)
g_string_append_printf (desc, "Deleted \"%s\".\n", removed_file);
else if (n_removed > 1)
g_string_append_printf (desc, "Deleted \"%s\" and %d more files.\n",
removed_file, n_removed - 1);
if (n_renamed == 1)
g_string_append_printf (desc, "Renamed \"%s\".\n", renamed_file);
else if (n_renamed > 1)
g_string_append_printf (desc, "Renamed \"%s\" and %d more files.\n",
renamed_file, n_renamed - 1);
if (n_new_dir == 1)
g_string_append_printf (desc, "Added directory \"%s\".\n", new_dir);
else if (n_new_dir > 1)
g_string_append_printf (desc, "Added \"%s\" and %d more directories.\n",
new_dir, n_new_dir - 1);
if (n_removed_dir == 1)
g_string_append_printf (desc, "Removed directory \"%s\".\n", removed_dir);
else if (n_removed_dir > 1)
g_string_append_printf (desc, "Removed \"%s\" and %d more directories.\n",
removed_dir, n_removed_dir - 1);
if (n_renamed_dir == 1)
g_string_append_printf (desc, "Renamed directory \"%s\".\n", renamed_dir);
else if (n_renamed_dir > 1)
g_string_append_printf (desc, "Renamed \"%s\" and %d more directories.\n",
renamed_dir, n_renamed_dir - 1);
return g_string_free (desc, FALSE);
}
================================================
FILE: common/diff-simple.h
================================================
#ifndef DIFF_SIMPLE_H
#define DIFF_SIMPLE_H
#include
#include "seafile-session.h"
#define DIFF_TYPE_WORKTREE 'W' /* diff from index to worktree */
#define DIFF_TYPE_INDEX 'I' /* diff from commit to index */
#define DIFF_TYPE_COMMITS 'C' /* diff between two commits*/
#define DIFF_STATUS_ADDED 'A'
#define DIFF_STATUS_DELETED 'D'
#define DIFF_STATUS_MODIFIED 'M'
#define DIFF_STATUS_RENAMED 'R'
#define DIFF_STATUS_UNMERGED 'U'
#define DIFF_STATUS_DIR_ADDED 'B'
#define DIFF_STATUS_DIR_DELETED 'C'
#define DIFF_STATUS_DIR_RENAMED 'E'
enum {
STATUS_UNMERGED_NONE,
/* I and others modified the same file differently. */
STATUS_UNMERGED_BOTH_CHANGED,
/* I and others created the same file with different contents. */
STATUS_UNMERGED_BOTH_ADDED,
/* I removed a file while others modified it. */
STATUS_UNMERGED_I_REMOVED,
/* Others removed a file while I modified it. */
STATUS_UNMERGED_OTHERS_REMOVED,
/* I replace a directory with a file while others modified files under the directory. */
STATUS_UNMERGED_DFC_I_ADDED_FILE,
/* Others replace a directory with a file while I modified files under the directory. */
STATUS_UNMERGED_DFC_OTHERS_ADDED_FILE,
};
typedef struct DiffEntry {
char type;
char status;
int unmerge_state;
unsigned char sha1[20]; /* used for resolve rename */
char *name;
char *new_name; /* only used in rename. */
gint64 size;
gint64 origin_size; /* only used in modified */
} DiffEntry;
DiffEntry *
diff_entry_new (char type, char status, unsigned char *sha1, const char *name);
void
diff_entry_free (DiffEntry *de);
/*
* @fold_dir_diff: if TRUE, only the top level directory will be included
* in the diff result if a directory with files is added or removed.
* Otherwise all the files in the direcotory will be recursively
* included in the diff result.
*/
int
diff_commits (SeafCommit *commit1, SeafCommit *commit2, GList **results,
gboolean fold_dir_diff);
int
diff_commit_roots (const char *store_id, int version,
const char *root1, const char *root2, GList **results,
gboolean fold_dir_diff);
int
diff_merge (SeafCommit *merge, GList **results, gboolean fold_dir_diff);
int
diff_merge_roots (const char *store_id, int version,
const char *merged_root, const char *p1_root, const char *p2_root,
GList **results, gboolean fold_dir_diff);
void
diff_resolve_renames (GList **diff_entries);
void
diff_resolve_empty_dirs (GList **diff_entries);
int
diff_unmerged_state(int mask);
char *
format_diff_results(GList *results);
char *
diff_results_to_description (GList *results);
typedef int (*DiffFileCB) (int n,
const char *basedir,
SeafDirent *files[],
void *data);
typedef int (*DiffDirCB) (int n,
const char *basedir,
SeafDirent *dirs[],
void *data,
gboolean *recurse);
typedef struct DiffOptions {
char store_id[37];
int version;
DiffFileCB file_cb;
DiffDirCB dir_cb;
void *data;
} DiffOptions;
int
diff_trees (int n, const char *roots[], DiffOptions *opt);
#endif
================================================
FILE: common/fs-mgr.c
================================================
/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
#ifndef _GNU_SOURECE
#define _GNU_SOURCE
char *strcasestr (const char *haystack, const char *needle);
#undef _GNU_SOURCE
#endif
#include "common.h"
#include
#include
#include
#ifndef WIN32
#include
#endif
#include
#include
#include "seafile-session.h"
#include "seafile-error.h"
#include "fs-mgr.h"
#include "block-mgr.h"
#include "utils.h"
#include "seaf-utils.h"
#define DEBUG_FLAG SEAFILE_DEBUG_OTHER
#include "log.h"
#include "../common/seafile-crypt.h"
#ifndef SEAFILE_SERVER
#include "../daemon/vc-utils.h"
#include "vc-common.h"
#endif /* SEAFILE_SERVER */
#include "db.h"
#define SEAF_TMP_EXT "~"
struct _SeafFSManagerPriv {
/* GHashTable *seafile_cache; */
GHashTable *bl_cache;
};
typedef struct SeafileOndisk {
guint32 type;
guint64 file_size;
unsigned char block_ids[0];
} __attribute__((__packed__)) SeafileOndisk;
typedef struct DirentOndisk {
guint32 mode;
char id[40];
guint32 name_len;
char name[0];
} __attribute__((__packed__)) DirentOndisk;
typedef struct SeafdirOndisk {
guint32 type;
char dirents[0];
} __attribute__((__packed__)) SeafdirOndisk;
#ifndef SEAFILE_SERVER
uint32_t
calculate_chunk_size (uint64_t total_size);
static int
write_seafile (SeafFSManager *fs_mgr,
const char *repo_id, int version,
CDCFileDescriptor *cdc,
unsigned char *obj_sha1);
#endif /* SEAFILE_SERVER */
SeafFSManager *
seaf_fs_manager_new (SeafileSession *seaf,
const char *seaf_dir)
{
SeafFSManager *mgr = g_new0 (SeafFSManager, 1);
mgr->seaf = seaf;
mgr->obj_store = seaf_obj_store_new (seaf, "fs");
if (!mgr->obj_store) {
g_free (mgr);
return NULL;
}
mgr->priv = g_new0(SeafFSManagerPriv, 1);
return mgr;
}
int
seaf_fs_manager_init (SeafFSManager *mgr)
{
if (seaf_obj_store_init (mgr->obj_store) < 0) {
seaf_warning ("[fs mgr] Failed to init fs object store.\n");
return -1;
}
return 0;
}
#ifndef SEAFILE_SERVER
static int
checkout_block (const char *repo_id,
int version,
const char *block_id,
int wfd,
SeafileCrypt *crypt)
{
SeafBlockManager *block_mgr = seaf->block_mgr;
BlockHandle *handle;
BlockMetadata *bmd;
char *dec_out = NULL;
int dec_out_len = -1;
char *blk_content = NULL;
handle = seaf_block_manager_open_block (block_mgr,
repo_id, version,
block_id, BLOCK_READ);
if (!handle) {
seaf_warning ("Failed to open block %s\n", block_id);
return -1;
}
/* first stat the block to get its size */
bmd = seaf_block_manager_stat_block_by_handle (block_mgr, handle);
if (!bmd) {
seaf_warning ("can't stat block %s.\n", block_id);
goto checkout_blk_error;
}
/* empty file, skip it */
if (bmd->size == 0) {
seaf_block_manager_close_block (block_mgr, handle);
seaf_block_manager_block_handle_free (block_mgr, handle);
return 0;
}
blk_content = (char *)malloc (bmd->size * sizeof(char));
/* read the block to prepare decryption */
if (seaf_block_manager_read_block (block_mgr, handle,
blk_content, bmd->size) != bmd->size) {
seaf_warning ("Error when reading from block %s.\n", block_id);
goto checkout_blk_error;
}
if (crypt != NULL) {
/* An encrypted block size must be a multiple of
ENCRYPT_BLK_SIZE
*/
if (bmd->size % ENCRYPT_BLK_SIZE != 0) {
seaf_warning ("Error: An invalid encrypted block, %s \n", block_id);
goto checkout_blk_error;
}
/* decrypt the block */
int ret = seafile_decrypt (&dec_out,
&dec_out_len,
blk_content,
bmd->size,
crypt);
if (ret != 0) {
seaf_warning ("Decryt block %s failed. \n", block_id);
goto checkout_blk_error;
}
/* write the decrypted content */
ret = writen (wfd, dec_out, dec_out_len);
if (ret != dec_out_len) {
seaf_warning ("Failed to write the decryted block %s.\n",
block_id);
goto checkout_blk_error;
}
g_free (blk_content);
g_free (dec_out);
} else {
/* not an encrypted block */
if (writen(wfd, blk_content, bmd->size) != bmd->size) {
seaf_warning ("Failed to write the decryted block %s.\n",
block_id);
goto checkout_blk_error;
}
g_free (blk_content);
}
g_free (bmd);
seaf_block_manager_close_block (block_mgr, handle);
seaf_block_manager_block_handle_free (block_mgr, handle);
return 0;
checkout_blk_error:
if (blk_content)
free (blk_content);
if (dec_out)
g_free (dec_out);
if (bmd)
g_free (bmd);
seaf_block_manager_close_block (block_mgr, handle);
seaf_block_manager_block_handle_free (block_mgr, handle);
return -1;
}
int
seaf_fs_manager_checkout_file (SeafFSManager *mgr,
const char *repo_id,
int version,
const char *file_id,
const char *file_path,
guint32 mode,
guint64 mtime,
SeafileCrypt *crypt,
const char *in_repo_path,
const char *conflict_head_id,
gboolean force_conflict,
gboolean *conflicted,
const char *email)
{
Seafile *seafile;
char *blk_id;
int wfd;
int i;
char *tmp_path;
char *conflict_path;
*conflicted = FALSE;
seafile = seaf_fs_manager_get_seafile (mgr, repo_id, version, file_id);
if (!seafile) {
seaf_warning ("File %s does not exist.\n", file_id);
return -1;
}
tmp_path = g_strconcat (file_path, SEAF_TMP_EXT, NULL);
mode_t rmode = mode & 0100 ? 0777 : 0666;
wfd = seaf_util_create (tmp_path, O_WRONLY | O_TRUNC | O_CREAT | O_BINARY,
rmode & ~S_IFMT);
if (wfd < 0) {
seaf_warning ("Failed to open file %s for checkout: %s.\n",
tmp_path, strerror(errno));
goto bad;
}
for (i = 0; i < seafile->n_blocks; ++i) {
blk_id = seafile->blk_sha1s[i];
if (checkout_block (repo_id, version, blk_id, wfd, crypt) < 0)
goto bad;
}
close (wfd);
wfd = -1;
if (force_conflict || seaf_util_rename (tmp_path, file_path) < 0) {
*conflicted = TRUE;
/* XXX
* In new syncing protocol and http sync, files are checked out before
* the repo is created. So we can't get user email from repo at this point.
* So a email parameter is needed.
* For old syncing protocol, repo always exists when files are checked out.
* This is a quick and dirty hack. A cleaner solution should modifiy the
* code of old syncing protocol to pass in email too. But I don't want to
* spend more time on the nearly obsoleted code.
*/
const char *suffix = NULL;
if (email) {
suffix = email;
} else {
SeafRepo *repo = seaf_repo_manager_get_repo (seaf->repo_mgr, repo_id);
if (!repo)
goto bad;
suffix = email;
}
conflict_path = gen_conflict_path (file_path, suffix, (gint64)time(NULL));
seaf_warning ("Cannot update %s, creating conflict file %s.\n",
file_path, conflict_path);
/* First try to rename the local version to a conflict file,
* this will preserve the version from the server.
* If this fails, fall back to checking out the server version
* to the conflict file.
*/
if (seaf_util_rename (file_path, conflict_path) == 0) {
if (seaf_util_rename (tmp_path, file_path) < 0) {
g_free (conflict_path);
goto bad;
}
} else {
g_free (conflict_path);
conflict_path = gen_conflict_path_wrapper (repo_id, version,
conflict_head_id, in_repo_path,
file_path);
if (!conflict_path)
goto bad;
if (seaf_util_rename (tmp_path, conflict_path) < 0) {
g_free (conflict_path);
goto bad;
}
}
g_free (conflict_path);
}
if (mtime > 0) {
/*
* Set the checked out file mtime to what it has to be.
*/
if (seaf_set_file_time (file_path, mtime) < 0) {
seaf_warning ("Failed to set mtime for %s.\n", file_path);
}
}
g_free (tmp_path);
seafile_unref (seafile);
return 0;
bad:
if (wfd >= 0)
close (wfd);
/* Remove the tmp file if it still exists, in case that rename fails. */
seaf_util_unlink (tmp_path);
g_free (tmp_path);
seafile_unref (seafile);
return -1;
}
#endif /* SEAFILE_SERVER */
static void *
create_seafile_v0 (CDCFileDescriptor *cdc, int *ondisk_size, char *seafile_id)
{
SeafileOndisk *ondisk;
rawdata_to_hex (cdc->file_sum, seafile_id, 20);
*ondisk_size = sizeof(SeafileOndisk) + cdc->block_nr * 20;
ondisk = (SeafileOndisk *)g_new0 (char, *ondisk_size);
ondisk->type = htonl(SEAF_METADATA_TYPE_FILE);
ondisk->file_size = hton64 (cdc->file_size);
memcpy (ondisk->block_ids, cdc->blk_sha1s, cdc->block_nr * 20);
return ondisk;
}
static void *
create_seafile_json (int repo_version,
CDCFileDescriptor *cdc,
int *ondisk_size,
char *seafile_id)
{
json_t *object, *block_id_array;
object = json_object ();
json_object_set_int_member (object, "type", SEAF_METADATA_TYPE_FILE);
json_object_set_int_member (object, "version",
seafile_version_from_repo_version(repo_version));
json_object_set_int_member (object, "size", cdc->file_size);
block_id_array = json_array ();
int i;
uint8_t *ptr = cdc->blk_sha1s;
char block_id[41];
for (i = 0; i < cdc->block_nr; ++i) {
rawdata_to_hex (ptr, block_id, 20);
json_array_append_new (block_id_array, json_string(block_id));
ptr += 20;
}
json_object_set_new (object, "block_ids", block_id_array);
char *data = json_dumps (object, JSON_SORT_KEYS);
*ondisk_size = strlen(data);
/* The seafile object id is sha1 hash of the json object. */
unsigned char sha1[20];
calculate_sha1 (sha1, data, *ondisk_size);
rawdata_to_hex (sha1, seafile_id, 20);
json_decref (object);
return data;
}
void
seaf_fs_manager_calculate_seafile_id_json (int repo_version,
CDCFileDescriptor *cdc,
guint8 *file_id_sha1)
{
json_t *object, *block_id_array;
object = json_object ();
json_object_set_int_member (object, "type", SEAF_METADATA_TYPE_FILE);
json_object_set_int_member (object, "version",
seafile_version_from_repo_version(repo_version));
json_object_set_int_member (object, "size", cdc->file_size);
block_id_array = json_array ();
int i;
uint8_t *ptr = cdc->blk_sha1s;
char block_id[41];
for (i = 0; i < cdc->block_nr; ++i) {
rawdata_to_hex (ptr, block_id, 20);
json_array_append_new (block_id_array, json_string(block_id));
ptr += 20;
}
json_object_set_new (object, "block_ids", block_id_array);
char *data = json_dumps (object, JSON_SORT_KEYS);
int ondisk_size = strlen(data);
/* The seafile object id is sha1 hash of the json object. */
calculate_sha1 (file_id_sha1, data, ondisk_size);
json_decref (object);
free (data);
}
static int
write_seafile (SeafFSManager *fs_mgr,
const char *repo_id,
int version,
CDCFileDescriptor *cdc,
unsigned char *obj_sha1)
{
int ret = 0;
char seafile_id[41];
void *ondisk;
int ondisk_size;
if (version > 0) {
ondisk = create_seafile_json (version, cdc, &ondisk_size, seafile_id);
guint8 *compressed;
int outlen;
if (seaf_obj_store_obj_exists (fs_mgr->obj_store, repo_id, version, seafile_id)) {
ret = 0;
free (ondisk);
goto out;
}
if (seaf_compress (ondisk, ondisk_size, &compressed, &outlen) < 0) {
seaf_warning ("Failed to compress seafile obj %s:%s.\n",
repo_id, seafile_id);
ret = -1;
free (ondisk);
goto out;
}
if (seaf_obj_store_write_obj (fs_mgr->obj_store, repo_id, version, seafile_id,
compressed, outlen, FALSE) < 0)
ret = -1;
g_free (compressed);
free (ondisk);
} else {
ondisk = create_seafile_v0 (cdc, &ondisk_size, seafile_id);
if (seaf_obj_store_obj_exists (fs_mgr->obj_store, repo_id, version, seafile_id)) {
ret = 0;
g_free (ondisk);
goto out;
}
if (seaf_obj_store_write_obj (fs_mgr->obj_store, repo_id, version, seafile_id,
ondisk, ondisk_size, FALSE) < 0)
ret = -1;
g_free (ondisk);
}
out:
if (ret == 0)
hex_to_rawdata (seafile_id, obj_sha1, 20);
return ret;
}
uint32_t
calculate_chunk_size (uint64_t total_size)
{
const uint64_t GiB = 1073741824;
const uint64_t MiB = 1048576;
if (total_size >= (8 * GiB)) return 8 * MiB;
if (total_size >= (4 * GiB)) return 4 * MiB;
if (total_size >= (2 * GiB)) return 2 * MiB;
return 1 * MiB;
}
static int
do_write_chunk (const char *repo_id, int version,
uint8_t *checksum, const char *buf, int len)
{
SeafBlockManager *blk_mgr = seaf->block_mgr;
char chksum_str[41];
BlockHandle *handle;
int n;
rawdata_to_hex (checksum, chksum_str, 20);
/* Don't write if the block already exists. */
if (seaf_block_manager_block_exists (seaf->block_mgr,
repo_id, version,
chksum_str))
return 0;
handle = seaf_block_manager_open_block (blk_mgr,
repo_id, version,
chksum_str, BLOCK_WRITE);
if (!handle) {
seaf_warning ("Failed to open block %s.\n", chksum_str);
return -1;
}
n = seaf_block_manager_write_block (blk_mgr, handle, buf, len);
if (n < 0) {
seaf_warning ("Failed to write chunk %s.\n", chksum_str);
seaf_block_manager_close_block (blk_mgr, handle);
seaf_block_manager_block_handle_free (blk_mgr, handle);
return -1;
}
if (seaf_block_manager_close_block (blk_mgr, handle) < 0) {
seaf_warning ("failed to close block %s.\n", chksum_str);
seaf_block_manager_block_handle_free (blk_mgr, handle);
return -1;
}
if (seaf_block_manager_commit_block (blk_mgr, handle) < 0) {
seaf_warning ("failed to commit chunk %s.\n", chksum_str);
seaf_block_manager_block_handle_free (blk_mgr, handle);
return -1;
}
seaf_block_manager_block_handle_free (blk_mgr, handle);
return 0;
}
/* write the chunk and store its checksum */
int
seafile_write_chunk (const char *repo_id,
int version,
CDCDescriptor *chunk,
SeafileCrypt *crypt,
uint8_t *checksum,
gboolean write_data)
{
SHA_CTX ctx;
int ret = 0;
/* Encrypt before write to disk if needed, and we don't encrypt
* empty files. */
if (crypt != NULL && chunk->len) {
char *encrypted_buf = NULL; /* encrypted output */
int enc_len = -1; /* encrypted length */
ret = seafile_encrypt (&encrypted_buf, /* output */
&enc_len, /* output len */
chunk->block_buf, /* input */
chunk->len, /* input len */
crypt);
if (ret != 0) {
seaf_warning ("Error: failed to encrypt block\n");
return -1;
}
SHA1_Init (&ctx);
SHA1_Update (&ctx, encrypted_buf, enc_len);
SHA1_Final (checksum, &ctx);
if (write_data)
ret = do_write_chunk (repo_id, version, checksum, encrypted_buf, enc_len);
g_free (encrypted_buf);
} else {
/* not a encrypted repo, go ahead */
SHA1_Init (&ctx);
SHA1_Update (&ctx, chunk->block_buf, chunk->len);
SHA1_Final (checksum, &ctx);
if (write_data)
ret = do_write_chunk (repo_id, version, checksum, chunk->block_buf, chunk->len);
}
return ret;
}
static void
create_cdc_for_empty_file (CDCFileDescriptor *cdc)
{
memset (cdc, 0, sizeof(CDCFileDescriptor));
}
#if defined SEAFILE_SERVER && defined FULL_FEATURE
#define FIXED_BLOCK_SIZE (1<<20)
typedef struct ChunkingData {
const char *repo_id;
int version;
const char *file_path;
SeafileCrypt *crypt;
guint8 *blk_sha1s;
GAsyncQueue *finished_tasks;
} ChunkingData;
static void
chunking_worker (gpointer vdata, gpointer user_data)
{
ChunkingData *data = user_data;
CDCDescriptor *chunk = vdata;
int fd = -1;
ssize_t n;
int idx;
chunk->block_buf = g_new0 (char, chunk->len);
if (!chunk->block_buf) {
seaf_warning ("Failed to allow chunk buffer\n");
goto out;
}
fd = seaf_util_open (data->file_path, O_RDONLY | O_BINARY);
if (fd < 0) {
seaf_warning ("Failed to open %s: %s\n", data->file_path, strerror(errno));
chunk->result = -1;
goto out;
}
if (seaf_util_lseek (fd, chunk->offset, SEEK_SET) == (gint64)-1) {
seaf_warning ("Failed to lseek %s: %s\n", data->file_path, strerror(errno));
chunk->result = -1;
goto out;
}
n = readn (fd, chunk->block_buf, chunk->len);
if (n < 0) {
seaf_warning ("Failed to read chunk from %s: %s\n",
data->file_path, strerror(errno));
chunk->result = -1;
goto out;
}
chunk->result = seafile_write_chunk (data->repo_id, data->version,
chunk, data->crypt,
chunk->checksum, 1);
if (chunk->result < 0)
goto out;
idx = chunk->offset / seaf->fixed_block_size;
memcpy (data->blk_sha1s + idx * CHECKSUM_LENGTH, chunk->checksum, CHECKSUM_LENGTH);
out:
g_free (chunk->block_buf);
close (fd);
g_async_queue_push (data->finished_tasks, chunk);
}
static int
split_file_to_block (const char *repo_id,
int version,
const char *file_path,
gint64 file_size,
SeafileCrypt *crypt,
CDCFileDescriptor *cdc,
gboolean write_data,
gint64 *indexed)
{
int n_blocks;
uint8_t *block_sha1s = NULL;
GThreadPool *tpool = NULL;
GAsyncQueue *finished_tasks = NULL;
GList *pending_tasks = NULL;
int n_pending = 0;
CDCDescriptor *chunk;
int ret = 0;
n_blocks = (file_size + seaf->fixed_block_size - 1) / seaf->fixed_block_size;
block_sha1s = g_new0 (uint8_t, n_blocks * CHECKSUM_LENGTH);
if (!block_sha1s) {
seaf_warning ("Failed to allocate block_sha1s.\n");
ret = -1;
goto out;
}
finished_tasks = g_async_queue_new ();
ChunkingData data;
memset (&data, 0, sizeof(data));
data.repo_id = repo_id;
data.version = version;
data.file_path = file_path;
data.crypt = crypt;
data.blk_sha1s = block_sha1s;
data.finished_tasks = finished_tasks;
tpool = g_thread_pool_new (chunking_worker, &data,
seaf->max_indexing_threads, FALSE, NULL);
if (!tpool) {
seaf_warning ("Failed to allocate thread pool\n");
ret = -1;
goto out;
}
guint64 offset = 0;
guint64 len;
guint64 left = (guint64)file_size;
while (left > 0) {
len = ((left >= seaf->fixed_block_size) ? seaf->fixed_block_size : left);
chunk = g_new0 (CDCDescriptor, 1);
chunk->offset = offset;
chunk->len = (guint32)len;
g_thread_pool_push (tpool, chunk, NULL);
pending_tasks = g_list_prepend (pending_tasks, chunk);
n_pending++;
left -= len;
offset += len;
}
while ((chunk = g_async_queue_pop (finished_tasks)) != NULL) {
if (chunk->result < 0) {
ret = -1;
goto out;
}
if (indexed)
*indexed += seaf->fixed_block_size;
if ((--n_pending) <= 0) {
if (indexed)
*indexed = (guint64)file_size;
break;
}
}
cdc->block_nr = n_blocks;
cdc->blk_sha1s = block_sha1s;
out:
if (tpool)
g_thread_pool_free (tpool, TRUE, TRUE);
if (finished_tasks)
g_async_queue_unref (finished_tasks);
g_list_free_full (pending_tasks, g_free);
if (ret < 0)
g_free (block_sha1s);
return ret;
}
#endif /* SEAFILE_SERVER */
#define CDC_AVERAGE_BLOCK_SIZE (1 << 23) /* 8MB */
#define CDC_MIN_BLOCK_SIZE (6 * (1 << 20)) /* 6MB */
#define CDC_MAX_BLOCK_SIZE (10 * (1 << 20)) /* 10MB */
int
seaf_fs_manager_index_blocks (SeafFSManager *mgr,
const char *repo_id,
int version,
const char *file_path,
unsigned char sha1[],
gint64 *size,
SeafileCrypt *crypt,
gboolean write_data,
gboolean use_cdc,
gint64 *indexed)
{
SeafStat sb;
CDCFileDescriptor cdc;
if (seaf_stat (file_path, &sb) < 0) {
seaf_warning ("Bad file %s: %s.\n", file_path, strerror(errno));
return -1;
}
g_return_val_if_fail (S_ISREG(sb.st_mode), -1);
if (sb.st_size == 0) {
/* handle empty file. */
memset (sha1, 0, 20);
create_cdc_for_empty_file (&cdc);
} else {
memset (&cdc, 0, sizeof(cdc));
#if defined SEAFILE_SERVER && defined FULL_FEATURE
if (use_cdc || version == 0) {
cdc.block_sz = CDC_AVERAGE_BLOCK_SIZE;
cdc.block_min_sz = CDC_MIN_BLOCK_SIZE;
cdc.block_max_sz = CDC_MAX_BLOCK_SIZE;
cdc.write_block = seafile_write_chunk;
memcpy (cdc.repo_id, repo_id, 36);
cdc.version = version;
if (filename_chunk_cdc (file_path, &cdc, crypt, write_data, indexed) < 0) {
seaf_warning ("Failed to chunk file with CDC.\n");
return -1;
}
} else {
memcpy (cdc.repo_id, repo_id, 36);
cdc.version = version;
cdc.file_size = sb.st_size;
if (split_file_to_block (repo_id, version, file_path, sb.st_size,
crypt, &cdc, write_data, indexed) < 0) {
return -1;
}
}
#else
cdc.block_sz = CDC_AVERAGE_BLOCK_SIZE;
cdc.block_min_sz = CDC_MIN_BLOCK_SIZE;
cdc.block_max_sz = CDC_MAX_BLOCK_SIZE;
cdc.write_block = seafile_write_chunk;
memcpy (cdc.repo_id, repo_id, 36);
cdc.version = version;
if (filename_chunk_cdc (file_path, &cdc, crypt, write_data, indexed) < 0) {
seaf_warning ("Failed to chunk file with CDC.\n");
return -1;
}
#endif
if (write_data && write_seafile (mgr, repo_id, version, &cdc, sha1) < 0) {
g_free (cdc.blk_sha1s);
seaf_warning ("Failed to write seafile for %s.\n", file_path);
return -1;
}
}
*size = (gint64)sb.st_size;
if (cdc.blk_sha1s)
free (cdc.blk_sha1s);
return 0;
}
static int
check_and_write_block (const char *repo_id, int version,
const char *path, unsigned char *sha1, const char *block_id)
{
char *content;
gsize len;
GError *error = NULL;
int ret = 0;
if (!g_file_get_contents (path, &content, &len, &error)) {
if (error) {
seaf_warning ("Failed to read %s: %s.\n", path, error->message);
g_clear_error (&error);
return -1;
}
}
SHA_CTX block_ctx;
unsigned char checksum[20];
SHA1_Init (&block_ctx);
SHA1_Update (&block_ctx, content, len);
SHA1_Final (checksum, &block_ctx);
if (memcmp (checksum, sha1, 20) != 0) {
seaf_warning ("Block id %s:%s doesn't match content.\n", repo_id, block_id);
ret = -1;
goto out;
}
if (do_write_chunk (repo_id, version, sha1, content, len) < 0) {
ret = -1;
goto out;
}
out:
g_free (content);
return ret;
}
static int
check_and_write_file_blocks (CDCFileDescriptor *cdc, GList *paths, GList *blockids)
{
GList *ptr, *q;
SHA_CTX file_ctx;
int ret = 0;
SHA1_Init (&file_ctx);
for (ptr = paths, q = blockids; ptr; ptr = ptr->next, q = q->next) {
char *path = ptr->data;
char *blk_id = q->data;
unsigned char sha1[20];
hex_to_rawdata (blk_id, sha1, 20);
ret = check_and_write_block (cdc->repo_id, cdc->version, path, sha1, blk_id);
if (ret < 0)
goto out;
memcpy (cdc->blk_sha1s + cdc->block_nr * CHECKSUM_LENGTH,
sha1, CHECKSUM_LENGTH);
cdc->block_nr++;
SHA1_Update (&file_ctx, sha1, 20);
}
SHA1_Final (cdc->file_sum, &file_ctx);
out:
return ret;
}
static int
check_existed_file_blocks (CDCFileDescriptor *cdc, GList *blockids)
{
GList *q;
SHA_CTX file_ctx;
int ret = 0;
SHA1_Init (&file_ctx);
for (q = blockids; q; q = q->next) {
char *blk_id = q->data;
unsigned char sha1[20];
if (!seaf_block_manager_block_exists (
seaf->block_mgr, cdc->repo_id, cdc->version, blk_id)) {
ret = -1;
goto out;
}
hex_to_rawdata (blk_id, sha1, 20);
memcpy (cdc->blk_sha1s + cdc->block_nr * CHECKSUM_LENGTH,
sha1, CHECKSUM_LENGTH);
cdc->block_nr++;
SHA1_Update (&file_ctx, sha1, 20);
}
SHA1_Final (cdc->file_sum, &file_ctx);
out:
return ret;
}
static int
init_file_cdc (CDCFileDescriptor *cdc,
const char *repo_id, int version,
int block_nr, gint64 file_size)
{
memset (cdc, 0, sizeof(CDCFileDescriptor));
cdc->file_size = file_size;
cdc->blk_sha1s = (uint8_t *)calloc (sizeof(uint8_t), block_nr * CHECKSUM_LENGTH);
if (!cdc->blk_sha1s) {
seaf_warning ("Failed to alloc block sha1 array.\n");
return -1;
}
memcpy (cdc->repo_id, repo_id, 36);
cdc->version = version;
return 0;
}
int
seaf_fs_manager_index_file_blocks (SeafFSManager *mgr,
const char *repo_id,
int version,
GList *paths,
GList *blockids,
unsigned char sha1[],
gint64 file_size)
{
int ret = 0;
CDCFileDescriptor cdc;
if (!paths) {
/* handle empty file. */
memset (sha1, 0, 20);
create_cdc_for_empty_file (&cdc);
} else {
int block_nr = g_list_length (paths);
if (init_file_cdc (&cdc, repo_id, version, block_nr, file_size) < 0) {
ret = -1;
goto out;
}
if (check_and_write_file_blocks (&cdc, paths, blockids) < 0) {
seaf_warning ("Failed to check and write file blocks.\n");
ret = -1;
goto out;
}
if (write_seafile (mgr, repo_id, version, &cdc, sha1) < 0) {
seaf_warning ("Failed to write seafile.\n");
ret = -1;
goto out;
}
}
out:
if (cdc.blk_sha1s)
free (cdc.blk_sha1s);
return ret;
}
int
seaf_fs_manager_index_raw_blocks (SeafFSManager *mgr,
const char *repo_id,
int version,
GList *paths,
GList *blockids)
{
int ret = 0;
GList *ptr, *q;
if (!paths)
return -1;
for (ptr = paths, q = blockids; ptr; ptr = ptr->next, q = q->next) {
char *path = ptr->data;
char *blk_id = q->data;
unsigned char sha1[20];
hex_to_rawdata (blk_id, sha1, 20);
ret = check_and_write_block (repo_id, version, path, sha1, blk_id);
if (ret < 0)
break;
}
return ret;
}
int
seaf_fs_manager_index_existed_file_blocks (SeafFSManager *mgr,
const char *repo_id,
int version,
GList *blockids,
unsigned char sha1[],
gint64 file_size)
{
int ret = 0;
CDCFileDescriptor cdc;
int block_nr = g_list_length (blockids);
if (block_nr == 0) {
/* handle empty file. */
memset (sha1, 0, 20);
create_cdc_for_empty_file (&cdc);
} else {
if (init_file_cdc (&cdc, repo_id, version, block_nr, file_size) < 0) {
ret = -1;
goto out;
}
if (check_existed_file_blocks (&cdc, blockids) < 0) {
seaf_warning ("Failed to check and write file blocks.\n");
ret = -1;
goto out;
}
if (write_seafile (mgr, repo_id, version, &cdc, sha1) < 0) {
seaf_warning ("Failed to write seafile.\n");
ret = -1;
goto out;
}
}
out:
if (cdc.blk_sha1s)
free (cdc.blk_sha1s);
return ret;
}
void
seafile_ref (Seafile *seafile)
{
++seafile->ref_count;
}
static void
seafile_free (Seafile *seafile)
{
int i;
if (seafile->blk_sha1s) {
for (i = 0; i < seafile->n_blocks; ++i)
g_free (seafile->blk_sha1s[i]);
g_free (seafile->blk_sha1s);
}
g_free (seafile);
}
void
seafile_unref (Seafile *seafile)
{
if (!seafile)
return;
if (--seafile->ref_count <= 0)
seafile_free (seafile);
}
static Seafile *
seafile_from_v0_data (const char *id, const void *data, int len)
{
const SeafileOndisk *ondisk = data;
Seafile *seafile;
int id_list_len, n_blocks;
if (len < sizeof(SeafileOndisk)) {
seaf_warning ("[fs mgr] Corrupt seafile object %s.\n", id);
return NULL;
}
if (ntohl(ondisk->type) != SEAF_METADATA_TYPE_FILE) {
seaf_warning ("[fd mgr] %s is not a file.\n", id);
return NULL;
}
id_list_len = len - sizeof(SeafileOndisk);
if (id_list_len % 20 != 0) {
seaf_warning ("[fs mgr] Corrupt seafile object %s.\n", id);
return NULL;
}
n_blocks = id_list_len / 20;
seafile = g_new0 (Seafile, 1);
seafile->object.type = SEAF_METADATA_TYPE_FILE;
seafile->version = 0;
memcpy (seafile->file_id, id, 41);
seafile->file_size = ntoh64 (ondisk->file_size);
seafile->n_blocks = n_blocks;
seafile->blk_sha1s = g_new0 (char*, seafile->n_blocks);
const unsigned char *blk_sha1_ptr = ondisk->block_ids;
int i;
for (i = 0; i < seafile->n_blocks; ++i) {
char *blk_sha1 = g_new0 (char, 41);
seafile->blk_sha1s[i] = blk_sha1;
rawdata_to_hex (blk_sha1_ptr, blk_sha1, 20);
blk_sha1_ptr += 20;
}
seafile->ref_count = 1;
return seafile;
}
static Seafile *
seafile_from_json_object (const char *id, json_t *object)
{
json_t *block_id_array = NULL;
int type;
int version;
guint64 file_size;
Seafile *seafile = NULL;
/* Sanity checks. */
type = json_object_get_int_member (object, "type");
if (type != SEAF_METADATA_TYPE_FILE) {
seaf_debug ("Object %s is not a file.\n", id);
return NULL;
}
version = (int) json_object_get_int_member (object, "version");
if (version < 1) {
seaf_debug ("Seafile object %s version should be > 0, version is %d.\n",
id, version);
return NULL;
}
file_size = (guint64) json_object_get_int_member (object, "size");
block_id_array = json_object_get (object, "block_ids");
if (!block_id_array) {
seaf_debug ("No block id array in seafile object %s.\n", id);
return NULL;
}
seafile = g_new0 (Seafile, 1);
seafile->object.type = SEAF_METADATA_TYPE_FILE;
memcpy (seafile->file_id, id, 40);
seafile->version = version;
seafile->file_size = file_size;
seafile->n_blocks = json_array_size (block_id_array);
seafile->blk_sha1s = g_new0 (char *, seafile->n_blocks);
int i;
json_t *block_id_obj;
const char *block_id;
for (i = 0; i < seafile->n_blocks; ++i) {
block_id_obj = json_array_get (block_id_array, i);
block_id = json_string_value (block_id_obj);
if (!block_id || !is_object_id_valid(block_id)) {
seafile_free (seafile);
return NULL;
}
seafile->blk_sha1s[i] = g_strdup(block_id);
}
seafile->ref_count = 1;
return seafile;
}
static Seafile *
seafile_from_json (const char *id, void *data, int len)
{
guint8 *decompressed;
int outlen;
json_t *object = NULL;
json_error_t error;
Seafile *seafile;
if (seaf_decompress (data, len, &decompressed, &outlen) < 0) {
seaf_warning ("Failed to decompress seafile object %s.\n", id);
return NULL;
}
object = json_loadb ((const char *)decompressed, outlen, 0, &error);
g_free (decompressed);
if (!object) {
if (error.text)
seaf_warning ("Failed to load seafile json object: %s.\n", error.text);
else
seaf_warning ("Failed to load seafile json object.\n");
return NULL;
}
seafile = seafile_from_json_object (id, object);
json_decref (object);
return seafile;
}
static Seafile *
seafile_from_data (const char *id, void *data, int len, gboolean is_json)
{
if (is_json)
return seafile_from_json (id, data, len);
else
return seafile_from_v0_data (id, data, len);
}
Seafile *
seaf_fs_manager_get_seafile (SeafFSManager *mgr,
const char *repo_id,
int version,
const char *file_id)
{
void *data;
int len;
Seafile *seafile;
#if 0
seafile = g_hash_table_lookup (mgr->priv->seafile_cache, file_id);
if (seafile) {
seafile_ref (seafile);
return seafile;
}
#endif
if (memcmp (file_id, EMPTY_SHA1, 40) == 0) {
seafile = g_new0 (Seafile, 1);
memset (seafile->file_id, '0', 40);
seafile->ref_count = 1;
return seafile;
}
if (seaf_obj_store_read_obj (mgr->obj_store, repo_id, version,
file_id, &data, &len) < 0) {
seaf_warning ("[fs mgr] Failed to read file %s.\n", file_id);
return NULL;
}
seafile = seafile_from_data (file_id, data, len, (version > 0));
g_free (data);
#if 0
/*
* Add to cache. Also increase ref count.
*/
seafile_ref (seafile);
g_hash_table_insert (mgr->priv->seafile_cache, g_strdup(file_id), seafile);
#endif
return seafile;
}
static guint8 *
seafile_to_v0_data (Seafile *file, int *len)
{
SeafileOndisk *ondisk;
*len = sizeof(SeafileOndisk) + file->n_blocks * 20;
ondisk = (SeafileOndisk *)g_new0 (char, *len);
ondisk->type = htonl(SEAF_METADATA_TYPE_FILE);
ondisk->file_size = hton64 (file->file_size);
guint8 *ptr = ondisk->block_ids;
int i;
for (i = 0; i < file->n_blocks; ++i) {
hex_to_rawdata (file->blk_sha1s[i], ptr, 20);
ptr += 20;
}
return (guint8 *)ondisk;
}
static guint8 *
seafile_to_json (Seafile *file, int *len)
{
json_t *object, *block_id_array;
object = json_object ();
json_object_set_int_member (object, "type", SEAF_METADATA_TYPE_FILE);
json_object_set_int_member (object, "version", file->version);
json_object_set_int_member (object, "size", file->file_size);
block_id_array = json_array ();
int i;
for (i = 0; i < file->n_blocks; ++i) {
json_array_append_new (block_id_array, json_string(file->blk_sha1s[i]));
}
json_object_set_new (object, "block_ids", block_id_array);
char *data = json_dumps (object, JSON_SORT_KEYS);
*len = strlen(data);
unsigned char sha1[20];
calculate_sha1 (sha1, data, *len);
rawdata_to_hex (sha1, file->file_id, 20);
json_decref (object);
return (guint8 *)data;
}
static guint8 *
seafile_to_data (Seafile *file, int *len)
{
if (file->version > 0) {
guint8 *data;
int orig_len;
guint8 *compressed;
data = seafile_to_json (file, &orig_len);
if (!data)
return NULL;
if (seaf_compress (data, orig_len, &compressed, len) < 0) {
seaf_warning ("Failed to compress file object %s.\n", file->file_id);
g_free (data);
return NULL;
}
g_free (data);
return compressed;
} else
return seafile_to_v0_data (file, len);
}
int
seafile_save (SeafFSManager *fs_mgr,
const char *repo_id,
int version,
Seafile *file)
{
guint8 *data;
int len;
int ret = 0;
if (seaf_obj_store_obj_exists (fs_mgr->obj_store, repo_id, version, file->file_id))
return 0;
data = seafile_to_data (file, &len);
if (!data)
return -1;
if (seaf_obj_store_write_obj (fs_mgr->obj_store, repo_id, version, file->file_id,
data, len, FALSE) < 0)
ret = -1;
g_free (data);
return ret;
}
static void compute_dir_id_v0 (SeafDir *dir, GList *entries)
{
SHA_CTX ctx;
GList *p;
uint8_t sha1[20];
SeafDirent *dent;
guint32 mode_le;
/* ID for empty dirs is EMPTY_SHA1. */
if (entries == NULL) {
memset (dir->dir_id, '0', 40);
return;
}
SHA1_Init (&ctx);
for (p = entries; p; p = p->next) {
dent = (SeafDirent *)p->data;
SHA1_Update (&ctx, dent->id, 40);
SHA1_Update (&ctx, dent->name, dent->name_len);
/* Convert mode to little endian before compute. */
if (G_BYTE_ORDER == G_BIG_ENDIAN)
mode_le = GUINT32_SWAP_LE_BE (dent->mode);
else
mode_le = dent->mode;
SHA1_Update (&ctx, &mode_le, sizeof(mode_le));
}
SHA1_Final (sha1, &ctx);
rawdata_to_hex (sha1, dir->dir_id, 20);
}
SeafDir *
seaf_dir_new (const char *id, GList *entries, int version)
{
SeafDir *dir;
dir = g_new0(SeafDir, 1);
dir->version = version;
if (id != NULL) {
memcpy(dir->dir_id, id, 40);
dir->dir_id[40] = '\0';
} else if (version == 0) {
compute_dir_id_v0 (dir, entries);
}
dir->entries = entries;
if (dir->entries != NULL)
dir->ondisk = seaf_dir_to_data (dir, &dir->ondisk_size);
else
memcpy (dir->dir_id, EMPTY_SHA1, 40);
return dir;
}
void
seaf_dir_free (SeafDir *dir)
{
if (dir == NULL)
return;
GList *ptr = dir->entries;
while (ptr) {
seaf_dirent_free ((SeafDirent *)ptr->data);
ptr = ptr->next;
}
g_list_free (dir->entries);
g_free (dir->ondisk);
g_free(dir);
}
SeafDirent *
seaf_dirent_new (int version, const char *sha1, int mode, const char *name,
gint64 mtime, const char *modifier, gint64 size)
{
SeafDirent *dent;
dent = g_new0 (SeafDirent, 1);
dent->version = version;
memcpy(dent->id, sha1, 40);
dent->id[40] = '\0';
/* Mode for files must have 0644 set. To prevent the caller from forgetting,
* we set the bits here.
*/
if (S_ISREG(mode))
dent->mode = (mode | 0644);
else
dent->mode = mode;
dent->name = g_strdup(name);
dent->name_len = strlen(name);
if (version > 0) {
dent->mtime = mtime;
if (S_ISREG(mode)) {
dent->modifier = g_strdup(modifier);
dent->size = size;
}
}
return dent;
}
void
seaf_dirent_free (SeafDirent *dent)
{
if (!dent)
return;
g_free (dent->name);
g_free (dent->modifier);
g_free (dent);
}
SeafDirent *
seaf_dirent_dup (SeafDirent *dent)
{
SeafDirent *new_dent;
new_dent = g_memdup (dent, sizeof(SeafDirent));
new_dent->name = g_strdup(dent->name);
new_dent->modifier = g_strdup(dent->modifier);
return new_dent;
}
static SeafDir *
seaf_dir_from_v0_data (const char *dir_id, const uint8_t *data, int len)
{
SeafDir *root;
SeafDirent *dent;
const uint8_t *ptr;
int remain;
int dirent_base_size;
guint32 meta_type;
guint32 name_len;
ptr = data;
remain = len;
meta_type = get32bit (&ptr);
remain -= 4;
if (meta_type != SEAF_METADATA_TYPE_DIR) {
seaf_warning ("Data does not contain a directory.\n");
return NULL;
}
root = g_new0(SeafDir, 1);
root->object.type = SEAF_METADATA_TYPE_DIR;
root->version = 0;
memcpy(root->dir_id, dir_id, 40);
root->dir_id[40] = '\0';
dirent_base_size = 2 * sizeof(guint32) + 40;
while (remain > dirent_base_size) {
dent = g_new0(SeafDirent, 1);
dent->version = 0;
dent->mode = get32bit (&ptr);
memcpy (dent->id, ptr, 40);
dent->id[40] = '\0';
ptr += 40;
name_len = get32bit (&ptr);
remain -= dirent_base_size;
if (remain >= name_len) {
dent->name_len = MIN (name_len, SEAF_DIR_NAME_LEN - 1);
dent->name = g_strndup((const char *)ptr, dent->name_len);
ptr += dent->name_len;
remain -= dent->name_len;
} else {
seaf_warning ("Bad data format for dir objcet %s.\n", dir_id);
g_free (dent);
goto bad;
}
root->entries = g_list_prepend (root->entries, dent);
}
root->entries = g_list_reverse (root->entries);
return root;
bad:
seaf_dir_free (root);
return NULL;
}
static SeafDirent *
parse_dirent (const char *dir_id, int version, json_t *object)
{
guint32 mode;
const char *id;
const char *name;
gint64 mtime;
const char *modifier;
gint64 size;
mode = (guint32) json_object_get_int_member (object, "mode");
id = json_object_get_string_member (object, "id");
if (!id) {
seaf_debug ("Dirent id not set for dir object %s.\n", dir_id);
return NULL;
}
if (!is_object_id_valid (id)) {
seaf_debug ("Dirent id is invalid for dir object %s.\n", dir_id);
return NULL;
}
name = json_object_get_string_member (object, "name");
if (!name) {
seaf_debug ("Dirent name not set for dir object %s.\n", dir_id);
return NULL;
}
mtime = json_object_get_int_member (object, "mtime");
if (S_ISREG(mode)) {
modifier = json_object_get_string_member (object, "modifier");
if (!modifier) {
seaf_debug ("Dirent modifier not set for dir object %s.\n", dir_id);
return NULL;
}
size = json_object_get_int_member (object, "size");
}
SeafDirent *dirent = g_new0 (SeafDirent, 1);
dirent->version = version;
dirent->mode = mode;
memcpy (dirent->id, id, 40);
dirent->name_len = strlen(name);
dirent->name = g_strdup(name);
dirent->mtime = mtime;
if (S_ISREG(mode)) {
dirent->modifier = g_strdup(modifier);
dirent->size = size;
}
return dirent;
}
static SeafDir *
seaf_dir_from_json_object (const char *dir_id, json_t *object)
{
json_t *dirent_array = NULL;
int type;
int version;
SeafDir *dir = NULL;
/* Sanity checks. */
type = json_object_get_int_member (object, "type");
if (type != SEAF_METADATA_TYPE_DIR) {
seaf_debug ("Object %s is not a dir.\n", dir_id);
return NULL;
}
version = (int) json_object_get_int_member (object, "version");
if (version < 1) {
seaf_debug ("Dir object %s version should be > 0, version is %d.\n",
dir_id, version);
return NULL;
}
dirent_array = json_object_get (object, "dirents");
if (!dirent_array) {
seaf_debug ("No dirents in dir object %s.\n", dir_id);
return NULL;
}
dir = g_new0 (SeafDir, 1);
dir->object.type = SEAF_METADATA_TYPE_DIR;
memcpy (dir->dir_id, dir_id, 40);
dir->version = version;
size_t n_dirents = json_array_size (dirent_array);
int i;
json_t *dirent_obj;
SeafDirent *dirent;
for (i = 0; i < n_dirents; ++i) {
dirent_obj = json_array_get (dirent_array, i);
dirent = parse_dirent (dir_id, version, dirent_obj);
if (!dirent) {
seaf_dir_free (dir);
return NULL;
}
dir->entries = g_list_prepend (dir->entries, dirent);
}
dir->entries = g_list_reverse (dir->entries);
return dir;
}
static SeafDir *
seaf_dir_from_json (const char *dir_id, uint8_t *data, int len)
{
guint8 *decompressed;
int outlen;
json_t *object = NULL;
json_error_t error;
SeafDir *dir;
if (seaf_decompress (data, len, &decompressed, &outlen) < 0) {
seaf_warning ("Failed to decompress dir object %s.\n", dir_id);
return NULL;
}
object = json_loadb ((const char *)decompressed, outlen, 0, &error);
g_free (decompressed);
if (!object) {
if (error.text)
seaf_warning ("Failed to load seafdir json object: %s.\n", error.text);
else
seaf_warning ("Failed to load seafdir json object.\n");
return NULL;
}
dir = seaf_dir_from_json_object (dir_id, object);
json_decref (object);
return dir;
}
SeafDir *
seaf_dir_from_data (const char *dir_id, uint8_t *data, int len,
gboolean is_json)
{
if (is_json)
return seaf_dir_from_json (dir_id, data, len);
else
return seaf_dir_from_v0_data (dir_id, data, len);
}
inline static int
ondisk_dirent_size (SeafDirent *dirent)
{
return sizeof(DirentOndisk) + dirent->name_len;
}
static void *
seaf_dir_to_v0_data (SeafDir *dir, int *len)
{
SeafdirOndisk *ondisk;
int dir_ondisk_size = sizeof(SeafdirOndisk);
GList *dirents = dir->entries;
GList *ptr;
SeafDirent *de;
char *p;
DirentOndisk *de_ondisk;
for (ptr = dirents; ptr; ptr = ptr->next) {
de = ptr->data;
dir_ondisk_size += ondisk_dirent_size (de);
}
*len = dir_ondisk_size;
ondisk = (SeafdirOndisk *) g_new0 (char, dir_ondisk_size);
ondisk->type = htonl (SEAF_METADATA_TYPE_DIR);
p = ondisk->dirents;
for (ptr = dirents; ptr; ptr = ptr->next) {
de = ptr->data;
de_ondisk = (DirentOndisk *) p;
de_ondisk->mode = htonl(de->mode);
memcpy (de_ondisk->id, de->id, 40);
de_ondisk->name_len = htonl (de->name_len);
memcpy (de_ondisk->name, de->name, de->name_len);
p += ondisk_dirent_size (de);
}
return (void *)ondisk;
}
static void
add_to_dirent_array (json_t *array, SeafDirent *dirent)
{
json_t *object;
object = json_object ();
json_object_set_int_member (object, "mode", dirent->mode);
json_object_set_string_member (object, "id", dirent->id);
json_object_set_string_member (object, "name", dirent->name);
json_object_set_int_member (object, "mtime", dirent->mtime);
if (S_ISREG(dirent->mode)) {
json_object_set_string_member (object, "modifier", dirent->modifier);
json_object_set_int_member (object, "size", dirent->size);
}
json_array_append_new (array, object);
}
static void *
seaf_dir_to_json (SeafDir *dir, int *len)
{
json_t *object, *dirent_array;
GList *ptr;
SeafDirent *dirent;
object = json_object ();
json_object_set_int_member (object, "type", SEAF_METADATA_TYPE_DIR);
json_object_set_int_member (object, "version", dir->version);
dirent_array = json_array ();
for (ptr = dir->entries; ptr; ptr = ptr->next) {
dirent = ptr->data;
add_to_dirent_array (dirent_array, dirent);
}
json_object_set_new (object, "dirents", dirent_array);
char *data = json_dumps (object, JSON_SORT_KEYS);
*len = strlen(data);
/* The dir object id is sha1 hash of the json object. */
unsigned char sha1[20];
calculate_sha1 (sha1, data, *len);
rawdata_to_hex (sha1, dir->dir_id, 20);
json_decref (object);
return data;
}
void *
seaf_dir_to_data (SeafDir *dir, int *len)
{
if (dir->version > 0) {
guint8 *data;
int orig_len;
guint8 *compressed;
data = seaf_dir_to_json (dir, &orig_len);
if (!data)
return NULL;
if (seaf_compress (data, orig_len, &compressed, len) < 0) {
seaf_warning ("Failed to compress dir object %s.\n", dir->dir_id);
g_free (data);
return NULL;
}
g_free (data);
return compressed;
} else
return seaf_dir_to_v0_data (dir, len);
}
int
seaf_dir_save (SeafFSManager *fs_mgr,
const char *repo_id,
int version,
SeafDir *dir)
{
int ret = 0;
/* Don't need to save empty dir on disk. */
if (memcmp (dir->dir_id, EMPTY_SHA1, 40) == 0)
return 0;
if (seaf_obj_store_obj_exists (fs_mgr->obj_store, repo_id, version, dir->dir_id))
return 0;
if (seaf_obj_store_write_obj (fs_mgr->obj_store, repo_id, version, dir->dir_id,
dir->ondisk, dir->ondisk_size, FALSE) < 0)
ret = -1;
return ret;
}
SeafDir *
seaf_fs_manager_get_seafdir (SeafFSManager *mgr,
const char *repo_id,
int version,
const char *dir_id)
{
void *data;
int len;
SeafDir *dir;
/* TODO: add hash cache */
if (memcmp (dir_id, EMPTY_SHA1, 40) == 0) {
dir = g_new0 (SeafDir, 1);
dir->version = version;
memset (dir->dir_id, '0', 40);
return dir;
}
if (seaf_obj_store_read_obj (mgr->obj_store, repo_id, version,
dir_id, &data, &len) < 0) {
seaf_warning ("[fs mgr] Failed to read dir %s.\n", dir_id);
return NULL;
}
dir = seaf_dir_from_data (dir_id, data, len, (version > 0));
g_free (data);
return dir;
}
static gint
compare_dirents (gconstpointer a, gconstpointer b)
{
const SeafDirent *denta = a, *dentb = b;
return strcmp (dentb->name, denta->name);
}
static gboolean
is_dirents_sorted (GList *dirents)
{
GList *ptr;
SeafDirent *dent, *dent_n;
gboolean ret = TRUE;
for (ptr = dirents; ptr != NULL; ptr = ptr->next) {
dent = ptr->data;
if (!ptr->next)
break;
dent_n = ptr->next->data;
/* If dirents are not sorted in descending order, return FALSE. */
if (strcmp (dent->name, dent_n->name) < 0) {
ret = FALSE;
break;
}
}
return ret;
}
SeafDir *
seaf_fs_manager_get_seafdir_sorted (SeafFSManager *mgr,
const char *repo_id,
int version,
const char *dir_id)
{
SeafDir *dir = seaf_fs_manager_get_seafdir(mgr, repo_id, version, dir_id);
if (!dir)
return NULL;
/* Only some very old dir objects are not sorted. */
if (version > 0)
return dir;
if (!is_dirents_sorted (dir->entries))
dir->entries = g_list_sort (dir->entries, compare_dirents);
return dir;
}
SeafDir *
seaf_fs_manager_get_seafdir_sorted_by_path (SeafFSManager *mgr,
const char *repo_id,
int version,
const char *root_id,
const char *path)
{
SeafDir *dir = seaf_fs_manager_get_seafdir_by_path (mgr, repo_id,
version, root_id,
path, NULL);
if (!dir)
return NULL;
/* Only some very old dir objects are not sorted. */
if (version > 0)
return dir;
if (!is_dirents_sorted (dir->entries))
dir->entries = g_list_sort (dir->entries, compare_dirents);
return dir;
}
static int
parse_metadata_type_v0 (const uint8_t *data, int len)
{
const uint8_t *ptr = data;
if (len < sizeof(guint32))
return SEAF_METADATA_TYPE_INVALID;
return (int)(get32bit(&ptr));
}
static int
parse_metadata_type_json (const char *obj_id, uint8_t *data, int len)
{
guint8 *decompressed;
int outlen;
json_t *object;
json_error_t error;
int type;
if (seaf_decompress (data, len, &decompressed, &outlen) < 0) {
seaf_warning ("Failed to decompress fs object %s.\n", obj_id);
return SEAF_METADATA_TYPE_INVALID;
}
object = json_loadb ((const char *)decompressed, outlen, 0, &error);
g_free (decompressed);
if (!object) {
if (error.text)
seaf_warning ("Failed to load fs json object: %s.\n", error.text);
else
seaf_warning ("Failed to load fs json object.\n");
return SEAF_METADATA_TYPE_INVALID;
}
type = json_object_get_int_member (object, "type");
json_decref (object);
return type;
}
int
seaf_metadata_type_from_data (const char *obj_id,
uint8_t *data, int len, gboolean is_json)
{
if (is_json)
return parse_metadata_type_json (obj_id, data, len);
else
return parse_metadata_type_v0 (data, len);
}
SeafFSObject *
fs_object_from_v0_data (const char *obj_id, const uint8_t *data, int len)
{
int type = parse_metadata_type_v0 (data, len);
if (type == SEAF_METADATA_TYPE_FILE)
return (SeafFSObject *)seafile_from_v0_data (obj_id, data, len);
else if (type == SEAF_METADATA_TYPE_DIR)
return (SeafFSObject *)seaf_dir_from_v0_data (obj_id, data, len);
else {
seaf_warning ("Invalid object type %d.\n", type);
return NULL;
}
}
SeafFSObject *
fs_object_from_json (const char *obj_id, uint8_t *data, int len)
{
guint8 *decompressed;
int outlen;
json_t *object;
json_error_t error;
int type;
SeafFSObject *fs_obj;
if (seaf_decompress (data, len, &decompressed, &outlen) < 0) {
seaf_warning ("Failed to decompress fs object %s.\n", obj_id);
return NULL;
}
object = json_loadb ((const char *)decompressed, outlen, 0, &error);
g_free (decompressed);
if (!object) {
if (error.text)
seaf_warning ("Failed to load fs json object: %s.\n", error.text);
else
seaf_warning ("Failed to load fs json object.\n");
return NULL;
}
type = json_object_get_int_member (object, "type");
if (type == SEAF_METADATA_TYPE_FILE)
fs_obj = (SeafFSObject *)seafile_from_json_object (obj_id, object);
else if (type == SEAF_METADATA_TYPE_DIR)
fs_obj = (SeafFSObject *)seaf_dir_from_json_object (obj_id, object);
else {
seaf_warning ("Invalid fs type %d.\n", type);
json_decref (object);
return NULL;
}
json_decref (object);
return fs_obj;
}
SeafFSObject *
seaf_fs_object_from_data (const char *obj_id,
uint8_t *data, int len,
gboolean is_json)
{
if (is_json)
return fs_object_from_json (obj_id, data, len);
else
return fs_object_from_v0_data (obj_id, data, len);
}
void
seaf_fs_object_free (SeafFSObject *obj)
{
if (!obj)
return;
if (obj->type == SEAF_METADATA_TYPE_FILE)
seafile_unref ((Seafile *)obj);
else if (obj->type == SEAF_METADATA_TYPE_DIR)
seaf_dir_free ((SeafDir *)obj);
}
BlockList *
block_list_new ()
{
BlockList *bl = g_new0 (BlockList, 1);
bl->block_hash = g_hash_table_new_full (g_str_hash, g_str_equal, g_free, NULL);
bl->block_ids = g_ptr_array_new_with_free_func (g_free);
return bl;
}
void
block_list_free (BlockList *bl)
{
if (bl->block_hash)
g_hash_table_destroy (bl->block_hash);
g_ptr_array_free (bl->block_ids, TRUE);
g_free (bl);
}
void
block_list_insert (BlockList *bl, const char *block_id)
{
if (g_hash_table_lookup (bl->block_hash, block_id))
return;
char *key = g_strdup(block_id);
g_hash_table_replace (bl->block_hash, key, key);
g_ptr_array_add (bl->block_ids, g_strdup(block_id));
++bl->n_blocks;
}
BlockList *
block_list_difference (BlockList *bl1, BlockList *bl2)
{
BlockList *bl;
int i;
char *block_id;
char *key;
bl = block_list_new ();
for (i = 0; i < bl1->block_ids->len; ++i) {
block_id = g_ptr_array_index (bl1->block_ids, i);
if (g_hash_table_lookup (bl2->block_hash, block_id) == NULL) {
key = g_strdup(block_id);
g_hash_table_replace (bl->block_hash, key, key);
g_ptr_array_add (bl->block_ids, g_strdup(block_id));
++bl->n_blocks;
}
}
return bl;
}
static int
traverse_file (SeafFSManager *mgr,
const char *repo_id,
int version,
const char *id,
TraverseFSTreeCallback callback,
void *user_data,
gboolean skip_errors)
{
gboolean stop = FALSE;
if (memcmp (id, EMPTY_SHA1, 40) == 0)
return 0;
if (!callback (mgr, repo_id, version, id, SEAF_METADATA_TYPE_FILE, user_data, &stop) &&
!skip_errors)
return -1;
return 0;
}
static int
traverse_dir (SeafFSManager *mgr,
const char *repo_id,
int version,
const char *id,
TraverseFSTreeCallback callback,
void *user_data,
gboolean skip_errors)
{
SeafDir *dir;
GList *p;
SeafDirent *seaf_dent;
gboolean stop = FALSE;
if (!callback (mgr, repo_id, version,
id, SEAF_METADATA_TYPE_DIR, user_data, &stop) &&
!skip_errors)
return -1;
if (stop)
return 0;
dir = seaf_fs_manager_get_seafdir (mgr, repo_id, version, id);
if (!dir) {
seaf_warning ("[fs-mgr]get seafdir %s failed\n", id);
if (skip_errors)
return 0;
return -1;
}
for (p = dir->entries; p; p = p->next) {
seaf_dent = (SeafDirent *)p->data;
if (S_ISREG(seaf_dent->mode)) {
if (traverse_file (mgr, repo_id, version, seaf_dent->id,
callback, user_data, skip_errors) < 0) {
if (!skip_errors) {
seaf_dir_free (dir);
return -1;
}
}
} else if (S_ISDIR(seaf_dent->mode)) {
if (traverse_dir (mgr, repo_id, version, seaf_dent->id,
callback, user_data, skip_errors) < 0) {
if (!skip_errors) {
seaf_dir_free (dir);
return -1;
}
}
}
}
seaf_dir_free (dir);
return 0;
}
int
seaf_fs_manager_traverse_tree (SeafFSManager *mgr,
const char *repo_id,
int version,
const char *root_id,
TraverseFSTreeCallback callback,
void *user_data,
gboolean skip_errors)
{
if (strcmp (root_id, EMPTY_SHA1) == 0) {
return 0;
}
return traverse_dir (mgr, repo_id, version, root_id, callback, user_data, skip_errors);
}
static int
traverse_dir_path (SeafFSManager *mgr,
const char *repo_id,
int version,
const char *dir_path,
SeafDirent *dent,
TraverseFSPathCallback callback,
void *user_data)
{
SeafDir *dir;
GList *p;
SeafDirent *seaf_dent;
gboolean stop = FALSE;
char *sub_path;
int ret = 0;
if (!callback (mgr, dir_path, dent, user_data, &stop))
return -1;
if (stop)
return 0;
dir = seaf_fs_manager_get_seafdir (mgr, repo_id, version, dent->id);
if (!dir) {
seaf_warning ("get seafdir %s:%s failed\n", repo_id, dent->id);
return -1;
}
for (p = dir->entries; p; p = p->next) {
seaf_dent = (SeafDirent *)p->data;
sub_path = g_strconcat (dir_path, "/", seaf_dent->name, NULL);
if (S_ISREG(seaf_dent->mode)) {
if (!callback (mgr, sub_path, seaf_dent, user_data, &stop)) {
g_free (sub_path);
ret = -1;
break;
}
} else if (S_ISDIR(seaf_dent->mode)) {
if (traverse_dir_path (mgr, repo_id, version, sub_path, seaf_dent,
callback, user_data) < 0) {
g_free (sub_path);
ret = -1;
break;
}
}
g_free (sub_path);
}
seaf_dir_free (dir);
return ret;
}
int
seaf_fs_manager_traverse_path (SeafFSManager *mgr,
const char *repo_id,
int version,
const char *root_id,
const char *dir_path,
TraverseFSPathCallback callback,
void *user_data)
{
SeafDirent *dent;
int ret = 0;
dent = seaf_fs_manager_get_dirent_by_path (mgr, repo_id, version,
root_id, dir_path, NULL);
if (!dent) {
seaf_warning ("Failed to get dirent for %.8s:%s.\n", repo_id, dir_path);
return -1;
}
ret = traverse_dir_path (mgr, repo_id, version, dir_path, dent,
callback, user_data);
seaf_dirent_free (dent);
return ret;
}
static gboolean
fill_blocklist (SeafFSManager *mgr,
const char *repo_id, int version,
const char *obj_id, int type,
void *user_data, gboolean *stop)
{
BlockList *bl = user_data;
Seafile *seafile;
int i;
if (type == SEAF_METADATA_TYPE_FILE) {
seafile = seaf_fs_manager_get_seafile (mgr, repo_id, version, obj_id);
if (!seafile) {
seaf_warning ("[fs mgr] Failed to find file %s.\n", obj_id);
return FALSE;
}
for (i = 0; i < seafile->n_blocks; ++i)
block_list_insert (bl, seafile->blk_sha1s[i]);
seafile_unref (seafile);
}
return TRUE;
}
int
seaf_fs_manager_populate_blocklist (SeafFSManager *mgr,
const char *repo_id,
int version,
const char *root_id,
BlockList *bl)
{
return seaf_fs_manager_traverse_tree (mgr, repo_id, version, root_id,
fill_blocklist,
bl, FALSE);
}
gboolean
seaf_fs_manager_object_exists (SeafFSManager *mgr,
const char *repo_id,
int version,
const char *id)
{
/* Empty file and dir always exists. */
if (memcmp (id, EMPTY_SHA1, 40) == 0)
return TRUE;
return seaf_obj_store_obj_exists (mgr->obj_store, repo_id, version, id);
}
void
seaf_fs_manager_delete_object (SeafFSManager *mgr,
const char *repo_id,
int version,
const char *id)
{
seaf_obj_store_delete_obj (mgr->obj_store, repo_id, version, id);
}
gint64
seaf_fs_manager_get_file_size (SeafFSManager *mgr,
const char *repo_id,
int version,
const char *file_id)
{
Seafile *file;
gint64 file_size;
file = seaf_fs_manager_get_seafile (seaf->fs_mgr, repo_id, version, file_id);
if (!file) {
seaf_warning ("Couldn't get file %s:%s\n", repo_id, file_id);
return -1;
}
file_size = file->file_size;
seafile_unref (file);
return file_size;
}
static gint64
get_dir_size (SeafFSManager *mgr, const char *repo_id, int version, const char *id)
{
SeafDir *dir;
SeafDirent *seaf_dent;
guint64 size = 0;
gint64 result;
GList *p;
dir = seaf_fs_manager_get_seafdir (mgr, repo_id, version, id);
if (!dir)
return -1;
for (p = dir->entries; p; p = p->next) {
seaf_dent = (SeafDirent *)p->data;
if (S_ISREG(seaf_dent->mode)) {
if (dir->version > 0)
result = seaf_dent->size;
else {
result = seaf_fs_manager_get_file_size (mgr,
repo_id,
version,
seaf_dent->id);
if (result < 0) {
seaf_dir_free (dir);
return result;
}
}
size += result;
} else if (S_ISDIR(seaf_dent->mode)) {
result = get_dir_size (mgr, repo_id, version, seaf_dent->id);
if (result < 0) {
seaf_dir_free (dir);
return result;
}
size += result;
}
}
seaf_dir_free (dir);
return size;
}
gint64
seaf_fs_manager_get_fs_size (SeafFSManager *mgr,
const char *repo_id,
int version,
const char *root_id)
{
if (strcmp (root_id, EMPTY_SHA1) == 0)
return 0;
return get_dir_size (mgr, repo_id, version, root_id);
}
static int
count_dir_files (SeafFSManager *mgr, const char *repo_id, int version, const char *id)
{
SeafDir *dir;
SeafDirent *seaf_dent;
int count = 0;
int result;
GList *p;
dir = seaf_fs_manager_get_seafdir (mgr, repo_id, version, id);
if (!dir)
return -1;
for (p = dir->entries; p; p = p->next) {
seaf_dent = (SeafDirent *)p->data;
if (S_ISREG(seaf_dent->mode)) {
count ++;
} else if (S_ISDIR(seaf_dent->mode)) {
result = count_dir_files (mgr, repo_id, version, seaf_dent->id);
if (result < 0) {
seaf_dir_free (dir);
return result;
}
count += result;
}
}
seaf_dir_free (dir);
return count;
}
static int
get_file_count_info (SeafFSManager *mgr,
const char *repo_id,
int version,
const char *id,
gint64 *dir_count,
gint64 *file_count,
gint64 *size)
{
SeafDir *dir;
SeafDirent *seaf_dent;
GList *p;
int ret = 0;
dir = seaf_fs_manager_get_seafdir (mgr, repo_id, version, id);
if (!dir)
return -1;
for (p = dir->entries; p; p = p->next) {
seaf_dent = (SeafDirent *)p->data;
if (S_ISREG(seaf_dent->mode)) {
(*file_count)++;
if (version > 0)
(*size) += seaf_dent->size;
} else if (S_ISDIR(seaf_dent->mode)) {
(*dir_count)++;
ret = get_file_count_info (mgr, repo_id, version, seaf_dent->id,
dir_count, file_count, size);
}
}
seaf_dir_free (dir);
return ret;
}
int
seaf_fs_manager_count_fs_files (SeafFSManager *mgr,
const char *repo_id,
int version,
const char *root_id)
{
if (strcmp (root_id, EMPTY_SHA1) == 0)
return 0;
return count_dir_files (mgr, repo_id, version, root_id);
}
SeafDir *
seaf_fs_manager_get_seafdir_by_path (SeafFSManager *mgr,
const char *repo_id,
int version,
const char *root_id,
const char *path,
GError **error)
{
SeafDir *dir;
SeafDirent *dent;
const char *dir_id = root_id;
char *name, *saveptr;
char *tmp_path = g_strdup(path);
dir = seaf_fs_manager_get_seafdir (mgr, repo_id, version, dir_id);
if (!dir) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_DIR_MISSING, "directory is missing");
g_free (tmp_path);
return NULL;
}
name = strtok_r (tmp_path, "/", &saveptr);
while (name != NULL) {
GList *l;
for (l = dir->entries; l != NULL; l = l->next) {
dent = l->data;
if (strcmp(dent->name, name) == 0 && S_ISDIR(dent->mode)) {
dir_id = dent->id;
break;
}
}
if (!l) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_PATH_NO_EXIST,
"Path does not exists %s", path);
seaf_dir_free (dir);
dir = NULL;
break;
}
SeafDir *prev = dir;
dir = seaf_fs_manager_get_seafdir (mgr, repo_id, version, dir_id);
seaf_dir_free (prev);
if (!dir) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_DIR_MISSING,
"directory is missing");
break;
}
name = strtok_r (NULL, "/", &saveptr);
}
g_free (tmp_path);
return dir;
}
char *
seaf_fs_manager_path_to_obj_id (SeafFSManager *mgr,
const char *repo_id,
int version,
const char *root_id,
const char *path,
guint32 *mode,
GError **error)
{
char *copy = g_strdup (path);
int off = strlen(copy) - 1;
char *slash, *name;
SeafDir *base_dir = NULL;
SeafDirent *dent;
GList *p;
char *obj_id = NULL;
while (off >= 0 && copy[off] == '/')
copy[off--] = 0;
if (strlen(copy) == 0) {
/* the path is root "/" */
if (mode) {
*mode = S_IFDIR;
}
obj_id = g_strdup(root_id);
goto out;
}
slash = strrchr (copy, '/');
if (!slash) {
base_dir = seaf_fs_manager_get_seafdir (mgr, repo_id, version, root_id);
if (!base_dir) {
seaf_warning ("Failed to find root dir %s.\n", root_id);
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, " ");
goto out;
}
name = copy;
} else {
*slash = 0;
name = slash + 1;
GError *tmp_error = NULL;
base_dir = seaf_fs_manager_get_seafdir_by_path (mgr,
repo_id,
version,
root_id,
copy,
&tmp_error);
if (tmp_error &&
!g_error_matches(tmp_error,
SEAFILE_DOMAIN,
SEAF_ERR_PATH_NO_EXIST)) {
seaf_warning ("Failed to get dir for %s.\n", copy);
g_propagate_error (error, tmp_error);
goto out;
}
/* The path doesn't exist in this commit. */
if (!base_dir) {
g_propagate_error (error, tmp_error);
goto out;
}
}
for (p = base_dir->entries; p != NULL; p = p->next) {
dent = p->data;
if (!is_object_id_valid (dent->id))
continue;
if (strcmp (dent->name, name) == 0) {
obj_id = g_strdup (dent->id);
if (mode) {
*mode = dent->mode;
}
break;
}
}
out:
if (base_dir)
seaf_dir_free (base_dir);
g_free (copy);
return obj_id;
}
char *
seaf_fs_manager_get_seafile_id_by_path (SeafFSManager *mgr,
const char *repo_id,
int version,
const char *root_id,
const char *path,
GError **error)
{
guint32 mode;
char *file_id;
file_id = seaf_fs_manager_path_to_obj_id (mgr, repo_id, version,
root_id, path, &mode, error);
if (!file_id)
return NULL;
if (file_id && S_ISDIR(mode)) {
g_free (file_id);
return NULL;
}
return file_id;
}
char *
seaf_fs_manager_get_seafdir_id_by_path (SeafFSManager *mgr,
const char *repo_id,
int version,
const char *root_id,
const char *path,
GError **error)
{
guint32 mode = 0;
char *dir_id;
dir_id = seaf_fs_manager_path_to_obj_id (mgr, repo_id, version,
root_id, path, &mode, error);
if (!dir_id)
return NULL;
if (dir_id && !S_ISDIR(mode)) {
g_free (dir_id);
return NULL;
}
return dir_id;
}
SeafDirent *
seaf_fs_manager_get_dirent_by_path (SeafFSManager *mgr,
const char *repo_id,
int version,
const char *root_id,
const char *path,
GError **error)
{
SeafDirent *dent = NULL;
SeafDir *dir = NULL;
char *parent_dir = NULL;
char *file_name = NULL;
parent_dir = g_path_get_dirname(path);
file_name = g_path_get_basename(path);
if (strcmp (parent_dir, ".") == 0) {
dir = seaf_fs_manager_get_seafdir (mgr, repo_id, version, root_id);
if (!dir) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_DIR_MISSING, "directory is missing");
}
} else
dir = seaf_fs_manager_get_seafdir_by_path (mgr, repo_id, version,
root_id, parent_dir, error);
if (!dir) {
goto out;
}
GList *p;
for (p = dir->entries; p; p = p->next) {
SeafDirent *d = p->data;
if (strcmp (d->name, file_name) == 0) {
dent = seaf_dirent_dup(d);
break;
}
}
out:
if (dir)
seaf_dir_free (dir);
g_free (parent_dir);
g_free (file_name);
return dent;
}
static gboolean
verify_seafdir_v0 (const char *dir_id, const uint8_t *data, int len,
gboolean verify_id)
{
guint32 meta_type;
guint32 mode;
char id[41];
guint32 name_len;
char name[SEAF_DIR_NAME_LEN];
const uint8_t *ptr;
int remain;
int dirent_base_size;
SHA_CTX ctx;
uint8_t sha1[20];
char check_id[41];
if (len < sizeof(SeafdirOndisk)) {
seaf_warning ("[fs mgr] Corrupt seafdir object %s.\n", dir_id);
return FALSE;
}
ptr = data;
remain = len;
meta_type = get32bit (&ptr);
remain -= 4;
if (meta_type != SEAF_METADATA_TYPE_DIR) {
seaf_warning ("Data does not contain a directory.\n");
return FALSE;
}
if (verify_id)
SHA1_Init (&ctx);
dirent_base_size = 2 * sizeof(guint32) + 40;
while (remain > dirent_base_size) {
mode = get32bit (&ptr);
memcpy (id, ptr, 40);
id[40] = '\0';
ptr += 40;
name_len = get32bit (&ptr);
remain -= dirent_base_size;
if (remain >= name_len) {
name_len = MIN (name_len, SEAF_DIR_NAME_LEN - 1);
memcpy (name, ptr, name_len);
ptr += name_len;
remain -= name_len;
} else {
seaf_warning ("Bad data format for dir objcet %s.\n", dir_id);
return FALSE;
}
if (verify_id) {
/* Convert mode to little endian before compute. */
if (G_BYTE_ORDER == G_BIG_ENDIAN)
mode = GUINT32_SWAP_LE_BE (mode);
SHA1_Update (&ctx, id, 40);
SHA1_Update (&ctx, name, name_len);
SHA1_Update (&ctx, &mode, sizeof(mode));
}
}
if (!verify_id)
return TRUE;
SHA1_Final (sha1, &ctx);
rawdata_to_hex (sha1, check_id, 20);
if (strcmp (check_id, dir_id) == 0)
return TRUE;
else
return FALSE;
}
static gboolean
verify_fs_object_json (const char *obj_id, uint8_t *data, int len)
{
guint8 *decompressed;
int outlen;
unsigned char sha1[20];
char hex[41];
if (seaf_decompress (data, len, &decompressed, &outlen) < 0) {
seaf_warning ("Failed to decompress fs object %s.\n", obj_id);
return FALSE;
}
calculate_sha1 (sha1, (const char *)decompressed, outlen);
rawdata_to_hex (sha1, hex, 20);
g_free (decompressed);
return (strcmp(hex, obj_id) == 0);
}
static gboolean
verify_seafdir (const char *dir_id, uint8_t *data, int len,
gboolean verify_id, gboolean is_json)
{
if (is_json)
return verify_fs_object_json (dir_id, data, len);
else
return verify_seafdir_v0 (dir_id, data, len, verify_id);
}
gboolean
seaf_fs_manager_verify_seafdir (SeafFSManager *mgr,
const char *repo_id,
int version,
const char *dir_id,
gboolean verify_id,
gboolean *io_error)
{
void *data;
int len;
if (memcmp (dir_id, EMPTY_SHA1, 40) == 0) {
return TRUE;
}
if (seaf_obj_store_read_obj (mgr->obj_store, repo_id, version,
dir_id, &data, &len) < 0) {
seaf_warning ("[fs mgr] Failed to read dir %s:%s.\n", repo_id, dir_id);
*io_error = TRUE;
return FALSE;
}
gboolean ret = verify_seafdir (dir_id, data, len, verify_id, (version > 0));
g_free (data);
return ret;
}
static gboolean
verify_seafile_v0 (const char *id, const void *data, int len, gboolean verify_id)
{
const SeafileOndisk *ondisk = data;
SHA_CTX ctx;
uint8_t sha1[20];
char check_id[41];
if (len < sizeof(SeafileOndisk)) {
seaf_warning ("[fs mgr] Corrupt seafile object %s.\n", id);
return FALSE;
}
if (ntohl(ondisk->type) != SEAF_METADATA_TYPE_FILE) {
seaf_warning ("[fd mgr] %s is not a file.\n", id);
return FALSE;
}
int id_list_length = len - sizeof(SeafileOndisk);
if (id_list_length % 20 != 0) {
seaf_warning ("[fs mgr] Bad seafile id list length %d.\n", id_list_length);
return FALSE;
}
if (!verify_id)
return TRUE;
SHA1_Init (&ctx);
SHA1_Update (&ctx, ondisk->block_ids, len - sizeof(SeafileOndisk));
SHA1_Final (sha1, &ctx);
rawdata_to_hex (sha1, check_id, 20);
if (strcmp (check_id, id) == 0)
return TRUE;
else
return FALSE;
}
static gboolean
verify_seafile (const char *id, void *data, int len,
gboolean verify_id, gboolean is_json)
{
if (is_json)
return verify_fs_object_json (id, data, len);
else
return verify_seafile_v0 (id, data, len, verify_id);
}
gboolean
seaf_fs_manager_verify_seafile (SeafFSManager *mgr,
const char *repo_id,
int version,
const char *file_id,
gboolean verify_id,
gboolean *io_error)
{
void *data;
int len;
if (memcmp (file_id, EMPTY_SHA1, 40) == 0) {
return TRUE;
}
if (seaf_obj_store_read_obj (mgr->obj_store, repo_id, version,
file_id, &data, &len) < 0) {
seaf_warning ("[fs mgr] Failed to read file %s:%s.\n", repo_id, file_id);
*io_error = TRUE;
return FALSE;
}
gboolean ret = verify_seafile (file_id, data, len, verify_id, (version > 0));
g_free (data);
return ret;
}
static gboolean
verify_fs_object_v0 (const char *obj_id,
uint8_t *data,
int len,
gboolean verify_id)
{
gboolean ret = TRUE;
int type = seaf_metadata_type_from_data (obj_id, data, len, FALSE);
switch (type) {
case SEAF_METADATA_TYPE_FILE:
ret = verify_seafile_v0 (obj_id, data, len, verify_id);
break;
case SEAF_METADATA_TYPE_DIR:
ret = verify_seafdir_v0 (obj_id, data, len, verify_id);
break;
default:
seaf_warning ("Invalid meta data type: %d.\n", type);
return FALSE;
}
return ret;
}
gboolean
seaf_fs_manager_verify_object (SeafFSManager *mgr,
const char *repo_id,
int version,
const char *obj_id,
gboolean verify_id,
gboolean *io_error)
{
void *data;
int len;
gboolean ret = TRUE;
if (memcmp (obj_id, EMPTY_SHA1, 40) == 0) {
return TRUE;
}
if (seaf_obj_store_read_obj (mgr->obj_store, repo_id, version,
obj_id, &data, &len) < 0) {
seaf_warning ("[fs mgr] Failed to read object %s:%s.\n", repo_id, obj_id);
*io_error = TRUE;
return FALSE;
}
if (version == 0)
ret = verify_fs_object_v0 (obj_id, data, len, verify_id);
else
ret = verify_fs_object_json (obj_id, data, len);
g_free (data);
return ret;
}
int
dir_version_from_repo_version (int repo_version)
{
if (repo_version == 0)
return 0;
else
return CURRENT_DIR_OBJ_VERSION;
}
int
seafile_version_from_repo_version (int repo_version)
{
if (repo_version == 0)
return 0;
else
return CURRENT_SEAFILE_OBJ_VERSION;
}
int
seaf_fs_manager_remove_store (SeafFSManager *mgr,
const char *store_id)
{
return seaf_obj_store_remove_store (mgr->obj_store, store_id);
}
GObject *
seaf_fs_manager_get_file_count_info_by_path (SeafFSManager *mgr,
const char *repo_id,
int version,
const char *root_id,
const char *path,
GError **error)
{
char *dir_id = NULL;
gint64 file_count = 0, dir_count = 0, size = 0;
SeafileFileCountInfo *info = NULL;
dir_id = seaf_fs_manager_get_seafdir_id_by_path (mgr,
repo_id,
version,
root_id,
path, NULL);
if (!dir_id) {
seaf_warning ("Path %s doesn't exist or is not a dir in repo %.10s.\n",
path, repo_id);
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Bad path");
goto out;
}
if (get_file_count_info (mgr, repo_id, version,
dir_id, &dir_count, &file_count, &size) < 0) {
seaf_warning ("Failed to get count info from path %s in repo %.10s.\n",
path, repo_id);
goto out;
}
info = g_object_new (SEAFILE_TYPE_FILE_COUNT_INFO,
"file_count", file_count,
"dir_count", dir_count,
"size", size, NULL);
out:
g_free (dir_id);
return (GObject *)info;
}
static int
search_files_recursive (SeafFSManager *mgr,
const char *repo_id,
const char *path,
const char *id,
const char *str,
int version,
GList **file_list)
{
SeafDir *dir;
GList *p;
SeafDirent *seaf_dent;
int ret = 0;
char *full_path = NULL;
dir = seaf_fs_manager_get_seafdir (mgr, repo_id, version, id);
if (!dir) {
seaf_warning ("[fs-mgr]get seafdir %s failed\n", id);
return -1;
}
for (p = dir->entries; p; p = p->next) {
seaf_dent = (SeafDirent *)p->data;
full_path = g_strconcat (path, "/", seaf_dent->name, NULL);
if (seaf_dent->name && strcasestr (seaf_dent->name, str) != NULL) {
SearchResult *sr = g_new0(SearchResult, 1);
sr->path = g_strdup (full_path);
sr->size = seaf_dent->size;
sr->mtime = seaf_dent->mtime;
*file_list = g_list_prepend (*file_list, sr);
if (S_ISDIR(seaf_dent->mode)) {
sr->is_dir = TRUE;
}
}
if (S_ISDIR(seaf_dent->mode)) {
if (search_files_recursive (mgr, repo_id, full_path,
seaf_dent->id, str,
version, file_list) < 0) {
g_free (full_path);
ret = -1;
break;
}
}
g_free (full_path);
}
seaf_dir_free (dir);
return ret;
}
GList *
seaf_fs_manager_search_files_by_path (SeafFSManager *mgr,
const char *repo_id,
const char *path,
const char *str)
{
GList *file_list = NULL;
SeafCommit *head = NULL;
SeafRepo *repo = seaf_repo_manager_get_repo (seaf->repo_mgr, repo_id);
if (!repo) {
seaf_warning ("Failed to find repo %s\n", repo_id);
goto out;
}
head = seaf_commit_manager_get_commit (seaf->commit_mgr,repo->id, repo->version, repo->head->commit_id);
if (!head) {
seaf_warning ("Failed to find commit %s\n", repo->head->commit_id);
goto out;
}
if (!path || g_strcmp0 (path, "/") == 0) {
search_files_recursive (mgr, repo->store_id, "", head->root_id,
str, repo->version, &file_list);
} else {
char *dir_id = seaf_fs_manager_get_seafdir_id_by_path (mgr, repo->store_id, repo->version,
head->root_id, path, NULL);
if (!dir_id) {
seaf_warning ("Path %s doesn't exist or is not a dir in repo %.10s.\n", path, repo->store_id);
goto out;
}
search_files_recursive (mgr, repo->store_id, path, dir_id,
str, repo->version, &file_list);
g_free (dir_id);
}
out:
seaf_repo_unref (repo);
seaf_commit_unref (head);
return file_list;
}
================================================
FILE: common/fs-mgr.h
================================================
/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
#ifndef SEAF_FILE_MGR_H
#define SEAF_FILE_MGR_H
#include
#include "seafile-object.h"
#include "obj-store.h"
#include "cdc/cdc.h"
#include "../common/seafile-crypt.h"
#define CURRENT_DIR_OBJ_VERSION 1
#define CURRENT_SEAFILE_OBJ_VERSION 1
typedef struct _SeafFSManager SeafFSManager;
typedef struct _SeafFSObject SeafFSObject;
typedef struct _Seafile Seafile;
typedef struct _SeafDir SeafDir;
typedef struct _SeafDirent SeafDirent;
typedef enum {
SEAF_METADATA_TYPE_INVALID,
SEAF_METADATA_TYPE_FILE,
SEAF_METADATA_TYPE_LINK,
SEAF_METADATA_TYPE_DIR,
} SeafMetadataType;
/* Common to seafile and seafdir objects. */
struct _SeafFSObject {
int type;
};
struct _Seafile {
SeafFSObject object;
int version;
char file_id[41];
guint64 file_size;
guint32 n_blocks;
char **blk_sha1s;
int ref_count;
};
typedef struct SearchResult {
char *path;
gint64 size;
gint64 mtime;
gboolean is_dir;
} SearchResult;
void
seafile_ref (Seafile *seafile);
void
seafile_unref (Seafile *seafile);
int
seafile_save (SeafFSManager *fs_mgr,
const char *repo_id,
int version,
Seafile *file);
#define SEAF_DIR_NAME_LEN 256
struct _SeafDirent {
int version;
guint32 mode;
char id[41];
guint32 name_len;
char *name;
/* attributes for version > 0 */
gint64 mtime;
char *modifier; /* for files only */
gint64 size; /* for files only */
};
struct _SeafDir {
SeafFSObject object;
int version;
char dir_id[41];
GList *entries;
/* data in on-disk format. */
void *ondisk;
int ondisk_size;
};
SeafDir *
seaf_dir_new (const char *id, GList *entries, int version);
void
seaf_dir_free (SeafDir *dir);
SeafDir *
seaf_dir_from_data (const char *dir_id, uint8_t *data, int len,
gboolean is_json);
void *
seaf_dir_to_data (SeafDir *dir, int *len);
int
seaf_dir_save (SeafFSManager *fs_mgr,
const char *repo_id,
int version,
SeafDir *dir);
SeafDirent *
seaf_dirent_new (int version, const char *sha1, int mode, const char *name,
gint64 mtime, const char *modifier, gint64 size);
void
seaf_dirent_free (SeafDirent *dent);
SeafDirent *
seaf_dirent_dup (SeafDirent *dent);
int
seaf_metadata_type_from_data (const char *obj_id,
uint8_t *data, int len, gboolean is_json);
/* Parse an fs object without knowing its type. */
SeafFSObject *
seaf_fs_object_from_data (const char *obj_id,
uint8_t *data, int len,
gboolean is_json);
void
seaf_fs_object_free (SeafFSObject *obj);
typedef struct {
/* TODO: GHashTable may be inefficient when we have large number of IDs. */
GHashTable *block_hash;
GPtrArray *block_ids;
uint32_t n_blocks;
uint32_t n_valid_blocks;
} BlockList;
BlockList *
block_list_new ();
void
block_list_free (BlockList *bl);
void
block_list_insert (BlockList *bl, const char *block_id);
/* Return a blocklist containing block ids which are in @bl1 but
* not in @bl2.
*/
BlockList *
block_list_difference (BlockList *bl1, BlockList *bl2);
struct _SeafileSession;
typedef struct _SeafFSManagerPriv SeafFSManagerPriv;
struct _SeafFSManager {
struct _SeafileSession *seaf;
struct SeafObjStore *obj_store;
SeafFSManagerPriv *priv;
};
SeafFSManager *
seaf_fs_manager_new (struct _SeafileSession *seaf,
const char *seaf_dir);
int
seaf_fs_manager_init (SeafFSManager *mgr);
#ifndef SEAFILE_SERVER
int
seaf_fs_manager_checkout_file (SeafFSManager *mgr,
const char *repo_id,
int version,
const char *file_id,
const char *file_path,
guint32 mode,
guint64 mtime,
struct SeafileCrypt *crypt,
const char *in_repo_path,
const char *conflict_head_id,
gboolean force_conflict,
gboolean *conflicted,
const char *email);
#endif /* not SEAFILE_SERVER */
/**
* Check in blocks and create seafile/symlink object.
* Returns sha1 id for the seafile/symlink object in @sha1 parameter.
*/
int
seaf_fs_manager_index_file_blocks (SeafFSManager *mgr,
const char *repo_id,
int version,
GList *paths,
GList *blockids,
unsigned char sha1[],
gint64 file_size);
int
seaf_fs_manager_index_raw_blocks (SeafFSManager *mgr,
const char *repo_id,
int version,
GList *paths,
GList *blockids);
int
seaf_fs_manager_index_existed_file_blocks (SeafFSManager *mgr,
const char *repo_id,
int version,
GList *blockids,
unsigned char sha1[],
gint64 file_size);
int
seaf_fs_manager_index_blocks (SeafFSManager *mgr,
const char *repo_id,
int version,
const char *file_path,
unsigned char sha1[],
gint64 *size,
SeafileCrypt *crypt,
gboolean write_data,
gboolean use_cdc,
gint64 *indexed);
Seafile *
seaf_fs_manager_get_seafile (SeafFSManager *mgr,
const char *repo_id,
int version,
const char *file_id);
SeafDir *
seaf_fs_manager_get_seafdir (SeafFSManager *mgr,
const char *repo_id,
int version,
const char *dir_id);
/* Make sure entries in the returned dir is sorted in descending order.
*/
SeafDir *
seaf_fs_manager_get_seafdir_sorted (SeafFSManager *mgr,
const char *repo_id,
int version,
const char *dir_id);
SeafDir *
seaf_fs_manager_get_seafdir_sorted_by_path (SeafFSManager *mgr,
const char *repo_id,
int version,
const char *root_id,
const char *path);
int
seaf_fs_manager_populate_blocklist (SeafFSManager *mgr,
const char *repo_id,
int version,
const char *root_id,
BlockList *bl);
/*
* For dir object, set *stop to TRUE to stop traversing the subtree.
*/
typedef gboolean (*TraverseFSTreeCallback) (SeafFSManager *mgr,
const char *repo_id,
int version,
const char *obj_id,
int type,
void *user_data,
gboolean *stop);
int
seaf_fs_manager_traverse_tree (SeafFSManager *mgr,
const char *repo_id,
int version,
const char *root_id,
TraverseFSTreeCallback callback,
void *user_data,
gboolean skip_errors);
typedef gboolean (*TraverseFSPathCallback) (SeafFSManager *mgr,
const char *path,
SeafDirent *dent,
void *user_data,
gboolean *stop);
int
seaf_fs_manager_traverse_path (SeafFSManager *mgr,
const char *repo_id,
int version,
const char *root_id,
const char *dir_path,
TraverseFSPathCallback callback,
void *user_data);
gboolean
seaf_fs_manager_object_exists (SeafFSManager *mgr,
const char *repo_id,
int version,
const char *id);
void
seaf_fs_manager_delete_object (SeafFSManager *mgr,
const char *repo_id,
int version,
const char *id);
gint64
seaf_fs_manager_get_file_size (SeafFSManager *mgr,
const char *repo_id,
int version,
const char *file_id);
gint64
seaf_fs_manager_get_fs_size (SeafFSManager *mgr,
const char *repo_id,
int version,
const char *root_id);
#ifndef SEAFILE_SERVER
int
seafile_write_chunk (const char *repo_id,
int version,
CDCDescriptor *chunk,
SeafileCrypt *crypt,
uint8_t *checksum,
gboolean write_data);
int
seafile_check_write_chunk (CDCDescriptor *chunk,
uint8_t *sha1,
gboolean write_data);
#endif /* SEAFILE_SERVER */
uint32_t
calculate_chunk_size (uint64_t total_size);
int
seaf_fs_manager_count_fs_files (SeafFSManager *mgr,
const char *repo_id,
int version,
const char *root_id);
SeafDir *
seaf_fs_manager_get_seafdir_by_path(SeafFSManager *mgr,
const char *repo_id,
int version,
const char *root_id,
const char *path,
GError **error);
char *
seaf_fs_manager_get_seafile_id_by_path (SeafFSManager *mgr,
const char *repo_id,
int version,
const char *root_id,
const char *path,
GError **error);
char *
seaf_fs_manager_path_to_obj_id (SeafFSManager *mgr,
const char *repo_id,
int version,
const char *root_id,
const char *path,
guint32 *mode,
GError **error);
char *
seaf_fs_manager_get_seafdir_id_by_path (SeafFSManager *mgr,
const char *repo_id,
int version,
const char *root_id,
const char *path,
GError **error);
SeafDirent *
seaf_fs_manager_get_dirent_by_path (SeafFSManager *mgr,
const char *repo_id,
int version,
const char *root_id,
const char *path,
GError **error);
/* Check object integrity. */
gboolean
seaf_fs_manager_verify_seafdir (SeafFSManager *mgr,
const char *repo_id,
int version,
const char *dir_id,
gboolean verify_id,
gboolean *io_error);
gboolean
seaf_fs_manager_verify_seafile (SeafFSManager *mgr,
const char *repo_id,
int version,
const char *file_id,
gboolean verify_id,
gboolean *io_error);
gboolean
seaf_fs_manager_verify_object (SeafFSManager *mgr,
const char *repo_id,
int version,
const char *obj_id,
gboolean verify_id,
gboolean *io_error);
int
dir_version_from_repo_version (int repo_version);
int
seafile_version_from_repo_version (int repo_version);
struct _CDCFileDescriptor;
void
seaf_fs_manager_calculate_seafile_id_json (int repo_version,
struct _CDCFileDescriptor *cdc,
guint8 *file_id_sha1);
int
seaf_fs_manager_remove_store (SeafFSManager *mgr,
const char *store_id);
GObject *
seaf_fs_manager_get_file_count_info_by_path (SeafFSManager *mgr,
const char *repo_id,
int version,
const char *root_id,
const char *path,
GError **error);
GList *
seaf_fs_manager_search_files_by_path (SeafFSManager *mgr,
const char *repo_id,
const char *path,
const char *str);
#endif
================================================
FILE: common/group-mgr.c
================================================
/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
#include "common.h"
#include "seafile-session.h"
#include "seaf-db.h"
#include "group-mgr.h"
#include "org-mgr.h"
#include "seaf-utils.h"
#include "utils.h"
#include "log.h"
#define DEFAULT_MAX_CONNECTIONS 100
struct _CcnetGroupManagerPriv {
CcnetDB *db;
const char *table_name;
};
static int open_db (CcnetGroupManager *manager);
static int check_db_table (CcnetGroupManager *manager, CcnetDB *db);
CcnetGroupManager* ccnet_group_manager_new (SeafileSession *session)
{
CcnetGroupManager *manager = g_new0 (CcnetGroupManager, 1);
manager->session = session;
manager->priv = g_new0 (CcnetGroupManagerPriv, 1);
return manager;
}
int
ccnet_group_manager_init (CcnetGroupManager *manager)
{
return 0;
}
int
ccnet_group_manager_prepare (CcnetGroupManager *manager)
{
const char *table_name = g_getenv("SEAFILE_MYSQL_DB_GROUP_TABLE_NAME");
if (!table_name || g_strcmp0 (table_name, "") == 0)
manager->priv->table_name = g_strdup ("Group");
else
manager->priv->table_name = g_strdup (table_name);
return open_db(manager);
}
void ccnet_group_manager_start (CcnetGroupManager *manager)
{
}
static CcnetDB *
open_sqlite_db (CcnetGroupManager *manager)
{
CcnetDB *db = NULL;
char *db_dir;
char *db_path;
db_dir = g_build_filename (manager->session->ccnet_dir, "GroupMgr", NULL);
if (checkdir_with_mkdir(db_dir) < 0) {
ccnet_error ("Cannot open db dir %s: %s\n", db_dir,
strerror(errno));
g_free (db_dir);
return NULL;
}
g_free (db_dir);
db_path = g_build_filename (manager->session->ccnet_dir, "GroupMgr",
"groupmgr.db", NULL);
db = seaf_db_new_sqlite (db_path, DEFAULT_MAX_CONNECTIONS);
g_free (db_path);
return db;
}
static int
open_db (CcnetGroupManager *manager)
{
CcnetDB *db = NULL;
switch (seaf_db_type(manager->session->ccnet_db)) {
case SEAF_DB_TYPE_SQLITE:
db = open_sqlite_db (manager);
break;
case SEAF_DB_TYPE_PGSQL:
case SEAF_DB_TYPE_MYSQL:
db = manager->session->ccnet_db;
break;
}
if (!db)
return -1;
manager->priv->db = db;
if ((manager->session->ccnet_create_tables || seaf_db_type(db) == SEAF_DB_TYPE_PGSQL)
&& check_db_table (manager, db) < 0) {
ccnet_warning ("Failed to create group db tables.\n");
return -1;
}
return 0;
}
/* -------- Group Database Management ---------------- */
static int check_db_table (CcnetGroupManager *manager, CcnetDB *db)
{
char *sql;
GString *group_sql = g_string_new ("");
const char *table_name = manager->priv->table_name;
int db_type = seaf_db_type (db);
if (db_type == SEAF_DB_TYPE_MYSQL) {
g_string_printf (group_sql,
"CREATE TABLE IF NOT EXISTS `%s` (`group_id` BIGINT "
" PRIMARY KEY AUTO_INCREMENT, `group_name` VARCHAR(255),"
" `creator_name` VARCHAR(255), `timestamp` BIGINT,"
" `type` VARCHAR(32), `parent_group_id` INTEGER)"
"ENGINE=INNODB", table_name);
if (seaf_db_query (db, group_sql->str) < 0) {
g_string_free (group_sql, TRUE);
return -1;
}
sql = "CREATE TABLE IF NOT EXISTS `GroupUser` ( "
"`id` BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, `group_id` BIGINT,"
" `user_name` VARCHAR(255), `is_staff` tinyint, UNIQUE INDEX"
" (`group_id`, `user_name`), INDEX (`user_name`))"
"ENGINE=INNODB";
if (seaf_db_query (db, sql) < 0)
return -1;
sql = "CREATE TABLE IF NOT EXISTS GroupDNPair ( "
"id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, group_id INTEGER,"
" dn VARCHAR(255))ENGINE=INNODB";
if (seaf_db_query (db, sql) < 0)
return -1;
sql = "CREATE TABLE IF NOT EXISTS GroupStructure ( "
"id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, group_id INTEGER, "
"path VARCHAR(1024), UNIQUE INDEX(group_id))ENGINE=INNODB";
if (seaf_db_query (db, sql) < 0)
return -1;
} else if (db_type == SEAF_DB_TYPE_SQLITE) {
g_string_printf (group_sql,
"CREATE TABLE IF NOT EXISTS `%s` (`group_id` INTEGER"
" PRIMARY KEY AUTOINCREMENT, `group_name` VARCHAR(255),"
" `creator_name` VARCHAR(255), `timestamp` BIGINT,"
" `type` VARCHAR(32), `parent_group_id` INTEGER)", table_name);
if (seaf_db_query (db, group_sql->str) < 0) {
g_string_free (group_sql, TRUE);
return -1;
}
sql = "CREATE TABLE IF NOT EXISTS `GroupUser` (`group_id` INTEGER, "
"`user_name` VARCHAR(255), `is_staff` tinyint)";
if (seaf_db_query (db, sql) < 0)
return -1;
sql = "CREATE UNIQUE INDEX IF NOT EXISTS groupid_username_indx on "
"`GroupUser` (`group_id`, `user_name`)";
if (seaf_db_query (db, sql) < 0)
return -1;
sql = "CREATE INDEX IF NOT EXISTS username_indx on "
"`GroupUser` (`user_name`)";
if (seaf_db_query (db, sql) < 0)
return -1;
sql = "CREATE TABLE IF NOT EXISTS GroupDNPair (group_id INTEGER,"
" dn VARCHAR(255))";
if (seaf_db_query (db, sql) < 0)
return -1;
sql = "CREATE TABLE IF NOT EXISTS GroupStructure (group_id INTEGER PRIMARY KEY, "
"path VARCHAR(1024))";
if (seaf_db_query (db, sql) < 0)
return -1;
sql = "CREATE INDEX IF NOT EXISTS path_indx on "
"`GroupStructure` (`path`)";
if (seaf_db_query (db, sql) < 0)
return -1;
} else if (db_type == SEAF_DB_TYPE_PGSQL) {
g_string_printf (group_sql,
"CREATE TABLE IF NOT EXISTS \"%s\" (group_id SERIAL"
" PRIMARY KEY, group_name VARCHAR(255),"
" creator_name VARCHAR(255), timestamp BIGINT,"
" type VARCHAR(32), parent_group_id INTEGER)", table_name);
if (seaf_db_query (db, group_sql->str) < 0) {
g_string_free (group_sql, TRUE);
return -1;
}
sql = "CREATE TABLE IF NOT EXISTS GroupUser (group_id INTEGER,"
" user_name VARCHAR(255), is_staff smallint, UNIQUE "
" (group_id, user_name))";
if (seaf_db_query (db, sql) < 0)
return -1;
//if (!pgsql_index_exists (db, "groupuser_username_idx")) {
// sql = "CREATE INDEX groupuser_username_idx ON GroupUser (user_name)";
// if (seaf_db_query (db, sql) < 0)
// return -1;
//}
sql = "CREATE TABLE IF NOT EXISTS GroupDNPair (group_id INTEGER,"
" dn VARCHAR(255))";
if (seaf_db_query (db, sql) < 0)
return -1;
sql = "CREATE TABLE IF NOT EXISTS GroupStructure (group_id INTEGER PRIMARY KEY, "
"path VARCHAR(1024))";
if (seaf_db_query (db, sql) < 0)
return -1;
//if (!pgsql_index_exists (db, "structure_path_idx")) {
// sql = "CREATE INDEX structure_path_idx ON GroupStructure (path)";
// if (seaf_db_query (db, sql) < 0)
// return -1;
//}
}
g_string_free (group_sql, TRUE);
return 0;
}
static gboolean
get_group_id_cb (CcnetDBRow *row, void *data)
{
int *id = data;
int group_id = seaf_db_row_get_column_int(row, 0);
*id = group_id;
return FALSE;
}
static gboolean
get_group_path_cb (CcnetDBRow *row, void *data)
{
char **path = (char **)data;
const char *group_path = seaf_db_row_get_column_text (row, 0);
*path = g_strdup (group_path);
return FALSE;
}
static int
create_group_common (CcnetGroupManager *mgr,
const char *group_name,
const char *user_name,
int parent_group_id,
GError **error)
{
CcnetDB *db = mgr->priv->db;
gint64 now = get_current_time();
GString *sql = g_string_new ("");
const char *table_name = mgr->priv->table_name;
int group_id = -1;
CcnetDBTrans *trans = seaf_db_begin_transaction (db);
char *user_name_l = g_ascii_strdown (user_name, -1);
if (seaf_db_type(db) == SEAF_DB_TYPE_PGSQL)
g_string_printf (sql,
"INSERT INTO \"%s\"(group_name, "
"creator_name, timestamp, parent_group_id) VALUES(?, ?, ?, ?)", table_name);
else
g_string_printf (sql,
"INSERT INTO `%s`(group_name, "
"creator_name, timestamp, parent_group_id) VALUES(?, ?, ?, ?)", table_name);
if (seaf_db_trans_query (trans, sql->str, 4,
"string", group_name, "string", user_name_l,
"int64", now, "int", parent_group_id) < 0)
goto error;
if (seaf_db_type(db) == SEAF_DB_TYPE_PGSQL)
g_string_printf (sql,
"SELECT group_id FROM \"%s\" WHERE "
"group_name = ? AND creator_name = ? "
"AND timestamp = ?", table_name);
else
g_string_printf (sql,
"SELECT group_id FROM `%s` WHERE "
"group_name = ? AND creator_name = ? "
"AND timestamp = ?", table_name);
seaf_db_trans_foreach_selected_row (trans, sql->str, get_group_id_cb,
&group_id, 3, "string", group_name,
"string", user_name_l, "int64", now);
if (group_id < 0)
goto error;
if (g_strcmp0(user_name, "system admin") != 0) {
g_string_printf (sql, "INSERT INTO GroupUser (group_id, user_name, is_staff) VALUES (?, ?, ?)");
if (seaf_db_trans_query (trans, sql->str, 3,
"int", group_id, "string", user_name_l,
"int", 1) < 0)
goto error;
}
if (parent_group_id == -1) {
g_string_printf (sql, "INSERT INTO GroupStructure (group_id, path) VALUES (?,'%d')", group_id);
if (seaf_db_trans_query (trans, sql->str, 1, "int", group_id) < 0)
goto error;
} else if (parent_group_id > 0) {
g_string_printf (sql, "SELECT path FROM GroupStructure WHERE group_id=?");
char *path = NULL;
seaf_db_trans_foreach_selected_row (trans, sql->str, get_group_path_cb,
&path, 1, "int", parent_group_id);
if (!path)
goto error;
g_string_printf (sql, "INSERT INTO GroupStructure (group_id, path) VALUES (?, '%s, %d')", path, group_id);
if (seaf_db_trans_query (trans, sql->str, 1, "int", group_id) < 0) {
g_free (path);
goto error;
}
g_free (path);
}
seaf_db_commit (trans);
seaf_db_trans_close (trans);
g_string_free (sql, TRUE);
g_free (user_name_l);
return group_id;
error:
seaf_db_rollback (trans);
seaf_db_trans_close (trans);
g_set_error (error, CCNET_DOMAIN, 0, "Failed to create group");
g_string_free (sql, TRUE);
g_free (user_name_l);
return -1;
}
int ccnet_group_manager_create_group (CcnetGroupManager *mgr,
const char *group_name,
const char *user_name,
int parent_group_id,
GError **error)
{
return create_group_common (mgr, group_name, user_name, parent_group_id, error);
}
/* static gboolean */
/* duplicate_org_group_name (CcnetGroupManager *mgr, */
/* int org_id, */
/* const char *group_name) */
/* { */
/* GList *org_groups = NULL, *ptr; */
/* CcnetOrgManager *org_mgr = seaf->org_mgr; */
/* org_groups = ccnet_org_manager_get_org_groups (org_mgr, org_id, -1, -1); */
/* if (!org_groups) */
/* return FALSE; */
/* for (ptr = org_groups; ptr; ptr = ptr->next) { */
/* int group_id = (int)(long)ptr->data; */
/* CcnetGroup *group = ccnet_group_manager_get_group (mgr, group_id, */
/* NULL); */
/* if (!group) */
/* continue; */
/* if (g_strcmp0 (group_name, ccnet_group_get_group_name(group)) == 0) { */
/* g_list_free (org_groups); */
/* g_object_unref (group); */
/* return TRUE; */
/* } else { */
/* g_object_unref (group); */
/* } */
/* } */
/* g_list_free (org_groups); */
/* return FALSE; */
/* } */
int ccnet_group_manager_create_org_group (CcnetGroupManager *mgr,
int org_id,
const char *group_name,
const char *user_name,
int parent_group_id,
GError **error)
{
CcnetOrgManager *org_mgr = seaf->org_mgr;
/* if (duplicate_org_group_name (mgr, org_id, group_name)) { */
/* g_set_error (error, CCNET_DOMAIN, 0, */
/* "The group has already created in this org."); */
/* return -1; */
/* } */
int group_id = create_group_common (mgr, group_name, user_name, parent_group_id, error);
if (group_id < 0) {
g_set_error (error, CCNET_DOMAIN, 0, "Failed to create org group.");
return -1;
}
if (ccnet_org_manager_add_org_group (org_mgr, org_id, group_id,
error) < 0) {
g_set_error (error, CCNET_DOMAIN, 0, "Failed to create org group.");
return -1;
}
return group_id;
}
static gboolean
check_group_staff (CcnetDB *db, int group_id, const char *user_name, gboolean in_structure)
{
gboolean exists, err;
if (!in_structure) {
exists = seaf_db_statement_exists (db, "SELECT group_id FROM GroupUser WHERE "
"group_id = ? AND user_name = ? AND "
"is_staff = 1", &err,
2, "int", group_id, "string", user_name);
if (err) {
ccnet_warning ("DB error when check staff user exist in GroupUser.\n");
return FALSE;
}
return exists;
}
GString *sql = g_string_new("");
g_string_printf (sql, "SELECT path FROM GroupStructure WHERE group_id=?");
char *path = seaf_db_statement_get_string (db, sql->str, 1, "int", group_id);
if (!path) {
exists = seaf_db_statement_exists (db, "SELECT group_id FROM GroupUser WHERE "
"group_id = ? AND user_name = ? AND "
"is_staff = 1", &err,
2, "int", group_id, "string", user_name);
} else {
g_string_printf (sql, "SELECT group_id FROM GroupUser WHERE "
"group_id IN (%s) AND user_name = ? AND "
"is_staff = 1", path);
exists = seaf_db_statement_exists (db, sql->str, &err,
1, "string", user_name);
}
g_string_free (sql, TRUE);
g_free (path);
if (err) {
ccnet_warning ("DB error when check staff user exist in GroupUser.\n");
return FALSE;
}
return exists;
}
int ccnet_group_manager_remove_group (CcnetGroupManager *mgr,
int group_id,
gboolean remove_anyway,
GError **error)
{
CcnetDB *db = mgr->priv->db;
GString *sql = g_string_new ("");
gboolean exists, err;
const char *table_name = mgr->priv->table_name;
/* No permission check here, since both group staff and seahub staff
* can remove group.
*/
if (remove_anyway != TRUE) {
if (seaf_db_type(db) == SEAF_DB_TYPE_PGSQL)
g_string_printf (sql, "SELECT 1 FROM \"%s\" WHERE parent_group_id=?", table_name);
else
g_string_printf (sql, "SELECT 1 FROM `%s` WHERE parent_group_id=?", table_name);
exists = seaf_db_statement_exists (db, sql->str, &err, 1, "int", group_id);
if (err) {
ccnet_warning ("DB error when check remove group.\n");
g_string_free (sql, TRUE);
return -1;
}
if (exists) {
ccnet_warning ("Failed to remove group [%d] whose child group must be removed first.\n", group_id);
g_string_free (sql, TRUE);
return -1;
}
}
if (seaf_db_type(db) == SEAF_DB_TYPE_PGSQL)
g_string_printf (sql, "DELETE FROM \"%s\" WHERE group_id=?", table_name);
else
g_string_printf (sql, "DELETE FROM `%s` WHERE group_id=?", table_name);
seaf_db_statement_query (db, sql->str, 1, "int", group_id);
g_string_printf (sql, "DELETE FROM GroupUser WHERE group_id=?");
seaf_db_statement_query (db, sql->str, 1, "int", group_id);
g_string_printf (sql, "DELETE FROM GroupStructure WHERE group_id=?");
seaf_db_statement_query (db, sql->str, 1, "int", group_id);
g_string_free (sql, TRUE);
return 0;
}
static gboolean
check_group_exists (CcnetGroupManager *mgr, CcnetDB *db, int group_id)
{
GString *sql = g_string_new ("");
const char *table_name = mgr->priv->table_name;
gboolean exists, err;
if (seaf_db_type(db) == SEAF_DB_TYPE_PGSQL) {
g_string_printf (sql, "SELECT group_id FROM \"%s\" WHERE group_id=?", table_name);
exists = seaf_db_statement_exists (db, sql->str, &err, 1, "int", group_id);
} else {
g_string_printf (sql, "SELECT group_id FROM `%s` WHERE group_id=?", table_name);
exists = seaf_db_statement_exists (db, sql->str, &err, 1, "int", group_id);
}
g_string_free (sql, TRUE);
if (err) {
ccnet_warning ("DB error when check group exist.\n");
return FALSE;
}
return exists;
}
int ccnet_group_manager_add_member (CcnetGroupManager *mgr,
int group_id,
const char *user_name,
const char *member_name,
GError **error)
{
CcnetDB *db = mgr->priv->db;
/* check whether group exists */
if (!check_group_exists (mgr, db, group_id)) {
g_set_error (error, CCNET_DOMAIN, 0, "Group not exists");
return -1;
}
char *member_name_l = g_ascii_strdown (member_name, -1);
int rc = seaf_db_statement_query (db, "INSERT INTO GroupUser (group_id, user_name, is_staff) VALUES (?, ?, ?)",
3, "int", group_id, "string", member_name_l,
"int", 0);
g_free (member_name_l);
if (rc < 0) {
g_set_error (error, CCNET_DOMAIN, 0, "Failed to add member to group");
return -1;
}
return 0;
}
int ccnet_group_manager_remove_member (CcnetGroupManager *mgr,
int group_id,
const char *user_name,
const char *member_name,
GError **error)
{
CcnetDB *db = mgr->priv->db;
char *sql;
/* check whether group exists */
if (!check_group_exists (mgr, db, group_id)) {
g_set_error (error, CCNET_DOMAIN, 0, "Group not exists");
return -1;
}
/* can not remove myself */
if (g_strcmp0 (user_name, member_name) == 0) {
g_set_error (error, CCNET_DOMAIN, 0, "Can not remove myself");
return -1;
}
sql = "DELETE FROM GroupUser WHERE group_id=? AND user_name=?";
seaf_db_statement_query (db, sql, 2, "int", group_id, "string", member_name);
return 0;
}
int ccnet_group_manager_set_admin (CcnetGroupManager *mgr,
int group_id,
const char *member_name,
GError **error)
{
CcnetDB *db = mgr->priv->db;
seaf_db_statement_query (db,
"UPDATE GroupUser SET is_staff = 1 "
"WHERE group_id = ? and user_name = ?",
2, "int", group_id, "string", member_name);
return 0;
}
int ccnet_group_manager_unset_admin (CcnetGroupManager *mgr,
int group_id,
const char *member_name,
GError **error)
{
CcnetDB *db = mgr->priv->db;
seaf_db_statement_query (db,
"UPDATE GroupUser SET is_staff = 0 "
"WHERE group_id = ? and user_name = ?",
2, "int", group_id, "string", member_name);
return 0;
}
int ccnet_group_manager_set_group_name (CcnetGroupManager *mgr,
int group_id,
const char *group_name,
GError **error)
{
const char *table_name = mgr->priv->table_name;
GString *sql = g_string_new ("");
CcnetDB *db = mgr->priv->db;
if (seaf_db_type(db) == SEAF_DB_TYPE_PGSQL) {
g_string_printf (sql, "UPDATE \"%s\" SET group_name = ? "
"WHERE group_id = ?", table_name);
seaf_db_statement_query (db, sql->str, 2, "string", group_name, "int", group_id);
} else {
g_string_printf (sql, "UPDATE `%s` SET group_name = ? "
"WHERE group_id = ?", table_name);
seaf_db_statement_query (db, sql->str, 2, "string", group_name, "int", group_id);
}
g_string_free (sql, TRUE);
return 0;
}
int ccnet_group_manager_quit_group (CcnetGroupManager *mgr,
int group_id,
const char *user_name,
GError **error)
{
CcnetDB *db = mgr->priv->db;
/* check whether group exists */
if (!check_group_exists (mgr, db, group_id)) {
g_set_error (error, CCNET_DOMAIN, 0, "Group not exists");
return -1;
}
seaf_db_statement_query (db,
"DELETE FROM GroupUser WHERE group_id=? "
"AND user_name=?",
2, "int", group_id, "string", user_name);
return 0;
}
static gboolean
get_user_groups_cb (CcnetDBRow *row, void *data)
{
GList **plist = data;
CcnetGroup *group;
int group_id = seaf_db_row_get_column_int (row, 0);
const char *group_name = seaf_db_row_get_column_text (row, 1);
const char *creator_name = seaf_db_row_get_column_text (row, 2);
gint64 ts = seaf_db_row_get_column_int64 (row, 3);
int parent_group_id = seaf_db_row_get_column_int (row, 4);
group = g_object_new (CCNET_TYPE_GROUP,
"id", group_id,
"group_name", group_name,
"creator_name", creator_name,
"timestamp", ts,
"source", "DB",
"parent_group_id", parent_group_id,
NULL);
*plist = g_list_append (*plist, group);
return TRUE;
}
GList *
ccnet_group_manager_get_ancestor_groups (CcnetGroupManager *mgr, int group_id)
{
CcnetDB *db = mgr->priv->db;
GList *ret = NULL;
CcnetGroup *group = NULL;
GString *sql = g_string_new ("");
const char *table_name = mgr->priv->table_name;
g_string_printf (sql, "SELECT path FROM GroupStructure WHERE group_id=?");
char *path = seaf_db_statement_get_string (db, sql->str, 1, "int", group_id);
if (path) {
if (seaf_db_type(db) == SEAF_DB_TYPE_PGSQL)
g_string_printf (sql, "SELECT g.group_id, group_name, creator_name, timestamp, parent_group_id FROM "
"\"%s\" g WHERE g.group_id IN(%s) "
"ORDER BY g.group_id",
table_name, path);
else
g_string_printf (sql, "SELECT g.group_id, group_name, creator_name, timestamp, parent_group_id FROM "
"`%s` g WHERE g.group_id IN(%s) "
"ORDER BY g.group_id",
table_name, path);
if (seaf_db_statement_foreach_row (db, sql->str, get_user_groups_cb, &ret, 0) < 0) {
ccnet_warning ("Failed to get ancestor groups of group %d\n", group_id);
g_string_free (sql, TRUE);
g_free (path);
return NULL;
}
g_string_free (sql, TRUE);
g_free (path);
} else { // group is not in structure, return itself.
group = ccnet_group_manager_get_group (mgr, group_id, NULL);
if (group) {
ret = g_list_prepend (ret, group);
}
}
return ret;
}
static gint
group_comp_func (gconstpointer a, gconstpointer b)
{
CcnetGroup *g1 = (CcnetGroup *)a;
CcnetGroup *g2 = (CcnetGroup *)b;
int id_1 = 0, id_2 = 0;
g_object_get (g1, "id", &id_1, NULL);
g_object_get (g2, "id", &id_2, NULL);
if (id_1 == id_2)
return 0;
return id_1 > id_2 ? -1 : 1;
}
gboolean
get_group_paths_cb (CcnetDBRow *row, void *data)
{
GString *paths = data;
const char *path = seaf_db_row_get_column_text (row, 0);
if (g_strcmp0 (paths->str, "") == 0)
g_string_append_printf (paths, "%s", path);
else
g_string_append_printf (paths, ", %s", path);
return TRUE;
}
GList *
ccnet_group_manager_get_groups_by_user (CcnetGroupManager *mgr,
const char *user_name,
gboolean return_ancestors,
GError **error)
{
CcnetDB *db = mgr->priv->db;
GList *groups = NULL, *ret = NULL;
GList *ptr;
GString *sql = g_string_new ("");
const char *table_name = mgr->priv->table_name;
CcnetGroup *group;
int parent_group_id = 0, group_id = 0;
if (seaf_db_type(db) == SEAF_DB_TYPE_PGSQL)
g_string_printf (sql,
"SELECT g.group_id, group_name, creator_name, timestamp, parent_group_id FROM "
"\"%s\" g, GroupUser u WHERE g.group_id = u.group_id AND user_name=? ORDER BY g.group_id DESC",
table_name);
else
g_string_printf (sql,
"SELECT g.group_id, group_name, creator_name, timestamp, parent_group_id FROM "
"`%s` g, GroupUser u WHERE g.group_id = u.group_id AND user_name=? ORDER BY g.group_id DESC",
table_name);
if (seaf_db_statement_foreach_row (db,
sql->str,
get_user_groups_cb,
&groups,
1, "string", user_name) < 0) {
g_string_free (sql, TRUE);
return NULL;
}
if (!return_ancestors) {
g_string_free (sql, TRUE);
return groups;
}
/* Get ancestor groups in descending order by group_id.*/
GString *paths = g_string_new ("");
g_string_erase (sql, 0, -1);
for (ptr = groups; ptr; ptr = ptr->next) {
group = ptr->data;
g_object_get (group, "parent_group_id", &parent_group_id, NULL);
g_object_get (group, "id", &group_id, NULL);
if (parent_group_id != 0) {
if (g_strcmp0(sql->str, "") == 0)
g_string_append_printf (sql, "SELECT path FROM GroupStructure WHERE group_id IN (%d", group_id);
else
g_string_append_printf (sql, ", %d", group_id);
} else {
g_object_ref (group);
ret = g_list_insert_sorted (ret, group, group_comp_func);
}
}
if (g_strcmp0(sql->str, "") != 0) {
g_string_append_printf (sql, ")");
if (seaf_db_statement_foreach_row (db,
sql->str,
get_group_paths_cb,
paths, 0) < 0) {
g_list_free_full (ret, g_object_unref);
ret = NULL;
goto out;
}
if (g_strcmp0(paths->str, "") == 0) {
ccnet_warning ("Failed to get groups path for user %s\n", user_name);
g_list_free_full (ret, g_object_unref);
ret = NULL;
goto out;
}
g_string_printf (sql, "SELECT g.group_id, group_name, creator_name, timestamp, parent_group_id FROM "
"`%s` g WHERE g.group_id IN (%s) ORDER BY g.group_id DESC",
table_name, paths->str);
if (seaf_db_statement_foreach_row (db,
sql->str,
get_user_groups_cb,
&ret, 0) < 0) {
g_list_free_full (ret, g_object_unref);
ret = NULL;
goto out;
}
}
ret = g_list_sort (ret, group_comp_func);
out:
g_string_free (sql, TRUE);
g_list_free_full (groups, g_object_unref);
g_string_free (paths, TRUE);
return ret;
}
static gboolean
get_ccnetgroup_cb (CcnetDBRow *row, void *data)
{
CcnetGroup **p_group = data;
int group_id;
const char *group_name;
const char *creator;
int parent_group_id;
gint64 ts;
group_id = seaf_db_row_get_column_int (row, 0);
group_name = (const char *)seaf_db_row_get_column_text (row, 1);
creator = (const char *)seaf_db_row_get_column_text (row, 2);
ts = seaf_db_row_get_column_int64 (row, 3);
parent_group_id = seaf_db_row_get_column_int (row, 4);
char *creator_l = g_ascii_strdown (creator, -1);
*p_group = g_object_new (CCNET_TYPE_GROUP,
"id", group_id,
"group_name", group_name,
"creator_name", creator_l,
"timestamp", ts,
"source", "DB",
"parent_group_id", parent_group_id,
NULL);
g_free (creator_l);
return FALSE;
}
GList *
ccnet_group_manager_get_child_groups (CcnetGroupManager *mgr, int group_id,
GError **error)
{
CcnetDB *db = mgr->priv->db;
GString *sql = g_string_new ("");
GList *ret = NULL;
const char *table_name = mgr->priv->table_name;
if (seaf_db_type(db) == SEAF_DB_TYPE_PGSQL)
g_string_printf (sql,
"SELECT group_id, group_name, creator_name, timestamp, parent_group_id FROM "
"\"%s\" WHERE parent_group_id=?", table_name);
else
g_string_printf (sql,
"SELECT group_id, group_name, creator_name, timestamp, parent_group_id FROM "
"`%s` WHERE parent_group_id=?", table_name);
if (seaf_db_statement_foreach_row (db, sql->str,
get_user_groups_cb, &ret,
1, "int", group_id) < 0) {
g_string_free (sql, TRUE);
return NULL;
}
g_string_free (sql, TRUE);
return ret;
}
GList *
ccnet_group_manager_get_descendants_groups(CcnetGroupManager *mgr, int group_id,
GError **error)
{
GList *ret = NULL;
CcnetDB *db = mgr->priv->db;
const char *table_name = mgr->priv->table_name;
GString *sql = g_string_new("");
if (seaf_db_type(db) == SEAF_DB_TYPE_PGSQL)
g_string_printf (sql, "SELECT g.group_id, group_name, creator_name, timestamp, "
"parent_group_id FROM \"%s\" g, GroupStructure s "
"WHERE g.group_id=s.group_id "
"AND (s.path LIKE '%d, %%' OR s.path LIKE '%%, %d, %%' "
"OR g.group_id=?)",
table_name, group_id, group_id);
else
g_string_printf (sql, "SELECT g.group_id, group_name, creator_name, timestamp, "
"parent_group_id FROM `%s` g, GroupStructure s "
"WHERE g.group_id=s.group_id "
"AND (s.path LIKE '%d, %%' OR s.path LIKE '%%, %d, %%' "
"OR g.group_id=?)",
table_name, group_id, group_id);
if (seaf_db_statement_foreach_row (db, sql->str,
get_user_groups_cb, &ret,
1, "int", group_id) < 0) {
g_string_free (sql, TRUE);
return NULL;
}
g_string_free (sql, TRUE);
return ret;
}
CcnetGroup *
ccnet_group_manager_get_group (CcnetGroupManager *mgr, int group_id,
GError **error)
{
CcnetDB *db = mgr->priv->db;
GString *sql = g_string_new ("");
CcnetGroup *ccnetgroup = NULL;
const char *table_name = mgr->priv->table_name;
if (seaf_db_type(db) == SEAF_DB_TYPE_PGSQL)
g_string_printf (sql,
"SELECT group_id, group_name, creator_name, timestamp, parent_group_id FROM "
"\"%s\" WHERE group_id = ?", table_name);
else
g_string_printf (sql,
"SELECT group_id, group_name, creator_name, timestamp, parent_group_id FROM "
"`%s` WHERE group_id = ?", table_name);
if (seaf_db_statement_foreach_row (db, sql->str,
get_ccnetgroup_cb, &ccnetgroup,
1, "int", group_id) < 0) {
g_string_free (sql, TRUE);
return NULL;
}
g_string_free (sql, TRUE);
return ccnetgroup;
}
static gboolean
get_ccnet_groupuser_cb (CcnetDBRow *row, void *data)
{
GList **plist = data;
CcnetGroupUser *group_user;
int group_id = seaf_db_row_get_column_int (row, 0);
const char *user = (const char *)seaf_db_row_get_column_text (row, 1);
int is_staff = seaf_db_row_get_column_int (row, 2);
char *user_l = g_ascii_strdown (user, -1);
group_user = g_object_new (CCNET_TYPE_GROUP_USER,
"group_id", group_id,
"user_name", user_l,
"is_staff", is_staff,
NULL);
g_free (user_l);
if (group_user != NULL) {
*plist = g_list_prepend (*plist, group_user);
}
return TRUE;
}
GList *
ccnet_group_manager_get_group_members (CcnetGroupManager *mgr,
int group_id,
int start,
int limit,
GError **error)
{
CcnetDB *db = mgr->priv->db;
char *sql;
GList *group_users = NULL;
int rc;
if (limit == -1) {
sql = "SELECT group_id, user_name, is_staff FROM GroupUser WHERE group_id = ?";
rc =seaf_db_statement_foreach_row (db, sql,
get_ccnet_groupuser_cb, &group_users,
1, "int", group_id);
} else {
sql = "SELECT group_id, user_name, is_staff FROM GroupUser WHERE group_id = ? LIMIT ? OFFSET ?";
rc = seaf_db_statement_foreach_row (db, sql,
get_ccnet_groupuser_cb, &group_users,
3, "int", group_id,
"int", limit,
"int", start);
}
if (rc < 0) {
return NULL;
}
return g_list_reverse (group_users);
}
GList *
ccnet_group_manager_get_members_with_prefix (CcnetGroupManager *mgr,
int group_id,
const char *prefix,
GError **error)
{
CcnetDB *db = mgr->priv->db;
GList *group_users = NULL;
GList *ptr;
CcnetGroup *group;
GString *sql = g_string_new ("");
int id;
g_string_printf(sql, "SELECT group_id, user_name, is_staff FROM GroupUser "
"WHERE group_id IN (");
GList *groups = ccnet_group_manager_get_descendants_groups(mgr, group_id, NULL);
if (!groups)
g_string_append_printf(sql, "%d", group_id);
for (ptr = groups; ptr; ptr = ptr->next) {
group = ptr->data;
g_object_get(group, "id", &id, NULL);
g_string_append_printf(sql, "%d", id);
if (ptr->next)
g_string_append_printf(sql, ", ");
}
g_string_append_printf(sql, ")");
if (prefix)
g_string_append_printf(sql, " AND user_name LIKE '%s%%'", prefix);
g_list_free_full (groups, g_object_unref);
if (seaf_db_statement_foreach_row (db, sql->str,
get_ccnet_groupuser_cb, &group_users, 0) < 0) {
g_string_free(sql, TRUE);
return NULL;
}
g_string_free(sql, TRUE);
return group_users;
}
int
ccnet_group_manager_check_group_staff (CcnetGroupManager *mgr,
int group_id,
const char *user_name,
gboolean in_structure)
{
return check_group_staff (mgr->priv->db, group_id, user_name, in_structure);
}
int
ccnet_group_manager_remove_group_user (CcnetGroupManager *mgr,
const char *user)
{
CcnetDB *db = mgr->priv->db;
seaf_db_statement_query (db,
"DELETE FROM GroupUser "
"WHERE user_name = ?",
1, "string", user);
return 0;
}
int
ccnet_group_manager_is_group_user (CcnetGroupManager *mgr,
int group_id,
const char *user,
gboolean in_structure)
{
CcnetDB *db = mgr->priv->db;
gboolean exists, err;
exists = seaf_db_statement_exists (db, "SELECT group_id FROM GroupUser "
"WHERE group_id=? AND user_name=?", &err,
2, "int", group_id, "string", user);
if (err) {
ccnet_warning ("DB error when check user exist in GroupUser.\n");
return 0;
}
if (!in_structure || exists)
return exists ? 1 : 0;
GList *ptr;
GList *groups = ccnet_group_manager_get_groups_by_user (mgr, user, TRUE, NULL);
if (!groups)
return 0;
CcnetGroup *group;
int id;
for (ptr = groups; ptr; ptr = ptr->next) {
group = ptr->data;
g_object_get (group, "id", &id, NULL);
if (group_id == id) {
exists = TRUE;
break;
}
}
g_list_free_full (groups, g_object_unref);
return exists ? 1 : 0;
}
static gboolean
get_all_ccnetgroups_cb (CcnetDBRow *row, void *data)
{
GList **plist = data;
int group_id;
const char *group_name;
const char *creator;
gint64 ts;
int parent_group_id;
group_id = seaf_db_row_get_column_int (row, 0);
group_name = (const char *)seaf_db_row_get_column_text (row, 1);
creator = (const char *)seaf_db_row_get_column_text (row, 2);
ts = seaf_db_row_get_column_int64 (row, 3);
parent_group_id = seaf_db_row_get_column_int (row, 4);
char *creator_l = g_ascii_strdown (creator, -1);
CcnetGroup *group = g_object_new (CCNET_TYPE_GROUP,
"id", group_id,
"group_name", group_name,
"creator_name", creator_l,
"timestamp", ts,
"source", "DB",
"parent_group_id", parent_group_id,
NULL);
g_free (creator_l);
*plist = g_list_prepend (*plist, group);
return TRUE;
}
GList *
ccnet_group_manager_get_top_groups (CcnetGroupManager *mgr,
gboolean including_org,
GError **error)
{
CcnetDB *db = mgr->priv->db;
GList *ret = NULL;
GString *sql = g_string_new ("");
const char *table_name = mgr->priv->table_name;
int rc;
if (seaf_db_type(mgr->priv->db) == SEAF_DB_TYPE_PGSQL) {
if (including_org)
g_string_printf (sql, "SELECT group_id, group_name, "
"creator_name, timestamp, parent_group_id FROM \"%s\" "
"WHERE parent_group_id=-1 ORDER BY timestamp DESC", table_name);
else
g_string_printf (sql, "SELECT g.group_id, g.group_name, "
"g.creator_name, g.timestamp, g.parent_group_id FROM \"%s\" g "
"LEFT JOIN OrgGroup o ON g.group_id = o.group_id "
"WHERE g.parent_group_id=-1 AND o.group_id is NULL "
"ORDER BY timestamp DESC", table_name);
} else {
if (including_org)
g_string_printf (sql, "SELECT group_id, group_name, "
"creator_name, timestamp, parent_group_id FROM `%s` "
"WHERE parent_group_id=-1 ORDER BY timestamp DESC", table_name);
else
g_string_printf (sql, "SELECT g.group_id, g.group_name, "
"g.creator_name, g.timestamp, g.parent_group_id FROM `%s` g "
"LEFT JOIN OrgGroup o ON g.group_id = o.group_id "
"WHERE g.parent_group_id=-1 AND o.group_id is NULL "
"ORDER BY timestamp DESC", table_name);
}
rc = seaf_db_statement_foreach_row (db, sql->str,
get_all_ccnetgroups_cb, &ret, 0);
g_string_free (sql, TRUE);
if (rc < 0)
return NULL;
return g_list_reverse (ret);
}
GList*
ccnet_group_manager_list_all_departments (CcnetGroupManager *mgr,
GError **error)
{
CcnetDB *db = mgr->priv->db;
GList *ret = NULL;
GString *sql = g_string_new ("");
const char *table_name = mgr->priv->table_name;
int rc;
int db_type = seaf_db_type(db);
if (db_type == SEAF_DB_TYPE_PGSQL) {
g_string_printf (sql, "SELECT group_id, group_name, "
"creator_name, timestamp, type, "
"parent_group_id FROM \"%s\" "
"WHERE parent_group_id = -1 OR parent_group_id > 0 "
"ORDER BY group_id", table_name);
rc = seaf_db_statement_foreach_row (db, sql->str,
get_all_ccnetgroups_cb, &ret, 0);
} else {
g_string_printf (sql, "SELECT `group_id`, `group_name`, "
"`creator_name`, `timestamp`, `type`, `parent_group_id` FROM `%s` "
"WHERE parent_group_id = -1 OR parent_group_id > 0 "
"ORDER BY group_id", table_name);
rc = seaf_db_statement_foreach_row (db, sql->str,
get_all_ccnetgroups_cb, &ret, 0);
}
g_string_free (sql, TRUE);
if (rc < 0)
return NULL;
return g_list_reverse (ret);
}
GList*
ccnet_group_manager_get_all_groups (CcnetGroupManager *mgr,
int start, int limit, GError **error)
{
CcnetDB *db = mgr->priv->db;
GList *ret = NULL;
GString *sql = g_string_new ("");
const char *table_name = mgr->priv->table_name;
int rc;
if (seaf_db_type(mgr->priv->db) == SEAF_DB_TYPE_PGSQL) {
if (start == -1 && limit == -1) {
g_string_printf (sql, "SELECT group_id, group_name, "
"creator_name, timestamp, parent_group_id FROM \"%s\" "
"ORDER BY timestamp DESC", table_name);
rc = seaf_db_statement_foreach_row (db, sql->str,
get_all_ccnetgroups_cb, &ret, 0);
} else {
g_string_printf (sql, "SELECT group_id, group_name, "
"creator_name, timestamp, parent_group_id FROM \"%s\" "
"ORDER BY timestamp DESC LIMIT ? OFFSET ?",
table_name);
rc = seaf_db_statement_foreach_row (db, sql->str,
get_all_ccnetgroups_cb, &ret,
2, "int", limit, "int", start);
}
} else {
if (start == -1 && limit == -1) {
g_string_printf (sql, "SELECT `group_id`, `group_name`, "
"`creator_name`, `timestamp`, `parent_group_id` FROM `%s` "
"ORDER BY timestamp DESC", table_name);
rc = seaf_db_statement_foreach_row (db, sql->str,
get_all_ccnetgroups_cb, &ret, 0);
} else {
g_string_printf (sql, "SELECT `group_id`, `group_name`, "
"`creator_name`, `timestamp`, `parent_group_id` FROM `%s` "
"ORDER BY timestamp DESC LIMIT ? OFFSET ?",
table_name);
rc = seaf_db_statement_foreach_row (db, sql->str,
get_all_ccnetgroups_cb, &ret,
2, "int", limit, "int", start);
}
}
g_string_free (sql, TRUE);
if (rc < 0)
return NULL;
return g_list_reverse (ret);
}
int
ccnet_group_manager_set_group_creator (CcnetGroupManager *mgr,
int group_id,
const char *user_name)
{
CcnetDB *db = mgr->priv->db;
const char *table_name = mgr->priv->table_name;
GString *sql = g_string_new ("");
if (seaf_db_type(db) == SEAF_DB_TYPE_PGSQL) {
g_string_printf (sql, "UPDATE \"%s\" SET creator_name = ? WHERE group_id = ?",
table_name);
} else {
g_string_printf (sql, "UPDATE `%s` SET creator_name = ? WHERE group_id = ?",
table_name);
}
seaf_db_statement_query (db, sql->str, 2, "string", user_name, "int", group_id);
g_string_free (sql, TRUE);
return 0;
}
GList *
ccnet_group_manager_search_groups (CcnetGroupManager *mgr,
const char *keyword,
int start, int limit)
{
CcnetDB *db = mgr->priv->db;
GList *ret = NULL;
GString *sql = g_string_new ("");
const char *table_name = mgr->priv->table_name;
int rc;
char *db_patt = g_strdup_printf ("%%%s%%", keyword);
if (seaf_db_type(db) == SEAF_DB_TYPE_PGSQL) {
if (start == -1 && limit == -1) {
g_string_printf (sql,
"SELECT group_id, group_name, "
"creator_name, timestamp, parent_group_id "
"FROM \"%s\" WHERE group_name LIKE ?", table_name);
rc = seaf_db_statement_foreach_row (db, sql->str,
get_all_ccnetgroups_cb, &ret,
1, "string", db_patt);
} else {
g_string_printf (sql,
"SELECT group_id, group_name, "
"creator_name, timestamp, parent_group_id "
"FROM \"%s\" WHERE group_name LIKE ? "
"LIMIT ? OFFSET ?", table_name);
rc = seaf_db_statement_foreach_row (db, sql->str,
get_all_ccnetgroups_cb, &ret,
3, "string", db_patt,
"int", limit, "int", start);
}
} else {
if (start == -1 && limit == -1) {
g_string_printf (sql,
"SELECT group_id, group_name, "
"creator_name, timestamp, parent_group_id "
"FROM `%s` WHERE group_name LIKE ?", table_name);
rc = seaf_db_statement_foreach_row (db, sql->str,
get_all_ccnetgroups_cb, &ret,
1, "string", db_patt);
} else {
g_string_printf (sql,
"SELECT group_id, group_name, "
"creator_name, timestamp, parent_group_id "
"FROM `%s` WHERE group_name LIKE ? "
"LIMIT ? OFFSET ?", table_name);
rc = seaf_db_statement_foreach_row (db, sql->str,
get_all_ccnetgroups_cb, &ret,
3, "string", db_patt,
"int", limit, "int", start);
}
}
g_free (db_patt);
g_string_free (sql, TRUE);
if (rc < 0) {
while (ret != NULL) {
g_object_unref (ret->data);
ret = g_list_delete_link (ret, ret);
}
return NULL;
}
return g_list_reverse (ret);
}
static gboolean
get_groups_members_cb (CcnetDBRow *row, void *data)
{
GList **users = data;
const char *user = seaf_db_row_get_column_text (row, 0);
char *user_l = g_ascii_strdown (user, -1);
CcnetGroupUser *group_user = g_object_new (CCNET_TYPE_GROUP_USER,
"user_name", user_l,
NULL);
g_free (user_l);
*users = g_list_append(*users, group_user);
return TRUE;
}
/* group_ids is json format: "[id1, id2, id3, ...]" */
GList *
ccnet_group_manager_get_groups_members (CcnetGroupManager *mgr, const char *group_ids,
GError **error)
{
CcnetDB *db = mgr->priv->db;
GList *ret = NULL;
GString *sql = g_string_new ("");
int i, group_id;
json_t *j_array = NULL, *j_obj;
json_error_t j_error;
g_string_printf (sql, "SELECT DISTINCT user_name FROM GroupUser WHERE group_id IN (");
j_array = json_loadb (group_ids, strlen(group_ids), 0, &j_error);
if (!j_array) {
g_set_error (error, CCNET_DOMAIN, 0, "Bad args.");
g_string_free (sql, TRUE);
return NULL;
}
size_t id_num = json_array_size (j_array);
for (i = 0; i < id_num; i++) {
j_obj = json_array_get (j_array, i);
group_id = json_integer_value (j_obj);
if (group_id <= 0) {
g_set_error (error, CCNET_DOMAIN, 0, "Bad args.");
g_string_free (sql, TRUE);
json_decref (j_array);
return NULL;
}
g_string_append_printf (sql, "%d", group_id);
if (i + 1 < id_num)
g_string_append_printf (sql, ",");
}
g_string_append_printf (sql, ")");
json_decref (j_array);
if (seaf_db_statement_foreach_row (db, sql->str, get_groups_members_cb, &ret, 0) < 0)
ccnet_warning("Failed to get groups members for group [%s].\n", group_ids);
g_string_free (sql, TRUE);
return ret;
}
GList*
ccnet_group_manager_search_group_members (CcnetGroupManager *mgr,
int group_id,
const char *pattern)
{
CcnetDB *db = mgr->priv->db;
GList *ret = NULL;
char *sql;
int rc;
char *db_patt = g_strdup_printf ("%%%s%%", pattern);
sql = "SELECT DISTINCT user_name FROM GroupUser "
"WHERE group_id = ? AND user_name LIKE ? ORDER BY user_name";
rc = seaf_db_statement_foreach_row (db, sql,
get_groups_members_cb, &ret,
2, "int", group_id, "string", db_patt);
g_free (db_patt);
if (rc < 0) {
g_list_free_full (ret, g_object_unref);
return NULL;
}
return g_list_reverse (ret);
}
int
ccnet_group_manager_update_group_user (CcnetGroupManager *mgr,
const char *old_email,
const char *new_email)
{
int rc;
CcnetDB *db = mgr->priv->db;
rc = seaf_db_statement_query (db,
"UPDATE GroupUser SET user_name=? "
"WHERE user_name = ?",
2, "string", new_email, "string", old_email);
if (rc < 0){
return -1;
}
return 0;
}
================================================
FILE: common/group-mgr.h
================================================
/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
#ifndef GROUP_MGR_H
#define GROUP_MGR_H
/* #define MAX_GROUP_MEMBERS 16 */
typedef struct _SeafileSession SeafileSession;
typedef struct _CcnetGroupManager CcnetGroupManager;
typedef struct _CcnetGroupManagerPriv CcnetGroupManagerPriv;
struct _CcnetGroupManager
{
SeafileSession *session;
CcnetGroupManagerPriv *priv;
};
CcnetGroupManager* ccnet_group_manager_new (SeafileSession *session);
int
ccnet_group_manager_prepare (CcnetGroupManager *manager);
void ccnet_group_manager_start (CcnetGroupManager *manager);
int ccnet_group_manager_create_group (CcnetGroupManager *mgr,
const char *group_name,
const char *user_name,
int parent_group_id,
GError **error);
int ccnet_group_manager_create_org_group (CcnetGroupManager *mgr,
int org_id,
const char *group_name,
const char *user_name,
int parent_group_id,
GError **error);
int ccnet_group_manager_remove_group (CcnetGroupManager *mgr,
int group_id,
gboolean remove_anyway,
GError **error);
int ccnet_group_manager_add_member (CcnetGroupManager *mgr,
int group_id,
const char *user_name,
const char *member_name,
GError **error);
int ccnet_group_manager_remove_member (CcnetGroupManager *mgr,
int group_id,
const char *user_name,
const char *member_name,
GError **error);
int ccnet_group_manager_set_admin (CcnetGroupManager *mgr,
int group_id,
const char *member_name,
GError **error);
int ccnet_group_manager_unset_admin (CcnetGroupManager *mgr,
int group_id,
const char *member_name,
GError **error);
int ccnet_group_manager_set_group_name (CcnetGroupManager *mgr,
int group_id,
const char *group_name,
GError **error);
int ccnet_group_manager_quit_group (CcnetGroupManager *mgr,
int group_id,
const char *user_name,
GError **error);
GList *
ccnet_group_manager_get_groups_by_user (CcnetGroupManager *mgr,
const char *user_name,
gboolean return_ancestors,
GError **error);
CcnetGroup *
ccnet_group_manager_get_group (CcnetGroupManager *mgr, int group_id,
GError **error);
GList *
ccnet_group_manager_get_group_members (CcnetGroupManager *mgr,
int group_id,
int start,
int limit,
GError **error);
GList *
ccnet_group_manager_get_members_with_prefix (CcnetGroupManager *mgr,
int group_id,
const char *prefix,
GError **error);
int
ccnet_group_manager_check_group_staff (CcnetGroupManager *mgr,
int group_id,
const char *user_name,
int in_structure);
int
ccnet_group_manager_remove_group_user (CcnetGroupManager *mgr,
const char *user);
int
ccnet_group_manager_is_group_user (CcnetGroupManager *mgr,
int group_id,
const char *user,
gboolean in_structure);
GList*
ccnet_group_manager_list_all_departments (CcnetGroupManager *mgr,
GError **error);
GList*
ccnet_group_manager_get_all_groups (CcnetGroupManager *mgr,
int start, int limit, GError **error);
int
ccnet_group_manager_set_group_creator (CcnetGroupManager *mgr,
int group_id,
const char *user_name);
GList*
ccnet_group_manager_search_groups (CcnetGroupManager *mgr,
const char *keyword,
int start, int limit);
GList*
ccnet_group_manager_search_group_members (CcnetGroupManager *mgr,
int group_id,
const char *pattern);
GList *
ccnet_group_manager_get_top_groups (CcnetGroupManager *mgr, gboolean including_org, GError **error);
GList *
ccnet_group_manager_get_child_groups (CcnetGroupManager *mgr, int group_id,
GError **error);
GList *
ccnet_group_manager_get_descendants_groups (CcnetGroupManager *mgr, int group_id,
GError **error);
GList *
ccnet_group_manager_get_ancestor_groups (CcnetGroupManager *mgr, int group_id);
GList *
ccnet_group_manager_get_groups_members (CcnetGroupManager *mgr, const char *group_ids,
GError **error);
int
ccnet_group_manager_update_group_user (CcnetGroupManager *mgr,
const char *old_email,
const char *new_email);
#endif /* GROUP_MGR_H */
================================================
FILE: common/log.c
================================================
/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
#include "common.h"
#include
#include
#ifndef WIN32
#ifdef SEAFILE_SERVER
#include
#endif
#endif
#include "log.h"
#include "utils.h"
/* message with greater log levels will be ignored */
static int ccnet_log_level;
static int seafile_log_level;
static char *logfile;
static FILE *logfp;
static gboolean log_to_stdout = FALSE;
static char *app_name;
#ifndef WIN32
#ifdef SEAFILE_SERVER
static gboolean enable_syslog;
#endif
#endif
#ifndef WIN32
#ifdef SEAFILE_SERVER
static int
get_syslog_level (GLogLevelFlags level)
{
switch (level) {
case G_LOG_LEVEL_DEBUG:
return LOG_DEBUG;
case G_LOG_LEVEL_INFO:
return LOG_INFO;
case G_LOG_LEVEL_WARNING:
return LOG_WARNING;
case G_LOG_LEVEL_ERROR:
return LOG_ERR;
case G_LOG_LEVEL_CRITICAL:
return LOG_ERR;
default:
return LOG_DEBUG;
}
}
#endif
#endif
static void
seafile_log (const gchar *log_domain, GLogLevelFlags log_level,
const gchar *message, gpointer user_data)
{
time_t t;
struct tm *tm;
char buf[1024];
int len;
if (log_level > seafile_log_level)
return;
if (log_to_stdout) {
char name_buf[32] = {0};
snprintf(name_buf, sizeof(name_buf), "[%s] ", app_name);
fputs (name_buf, logfp);
}
t = time(NULL);
tm = localtime(&t);
len = strftime (buf, 1024, "[%Y-%m-%d %H:%M:%S] ", tm);
g_return_if_fail (len < 1024);
if (logfp) {
fputs (buf, logfp);
if (log_level == G_LOG_LEVEL_DEBUG)
fputs ("[DEBUG] ", logfp);
else if (log_level == G_LOG_LEVEL_WARNING)
fputs ("[WARNING] ", logfp);
else if (log_level == G_LOG_LEVEL_CRITICAL)
fputs ("[ERROR] ", logfp);
else
fputs ("[INFO] ", logfp);
fputs (message, logfp);
fflush (logfp);
}
#ifndef WIN32
#ifdef SEAFILE_SERVER
if (enable_syslog)
syslog (get_syslog_level (log_level), "%s", message);
#endif
#endif
}
static void
ccnet_log (const gchar *log_domain, GLogLevelFlags log_level,
const gchar *message, gpointer user_data)
{
time_t t;
struct tm *tm;
char buf[1024];
int len;
if (log_level > ccnet_log_level)
return;
t = time(NULL);
tm = localtime(&t);
len = strftime (buf, 1024, "[%x %X] ", tm);
g_return_if_fail (len < 1024);
if (logfp) {
fputs (buf, logfp);
if (log_level == G_LOG_LEVEL_DEBUG)
fputs ("[DEBUG] ", logfp);
else if (log_level == G_LOG_LEVEL_WARNING)
fputs ("[WARNING] ", logfp);
else if (log_level == G_LOG_LEVEL_CRITICAL)
fputs ("[ERROR] ", logfp);
else
fputs ("[INFO] ", logfp);
fputs (message, logfp);
fflush (logfp);
}
#ifndef WIN32
#ifdef SEAFILE_SERVER
if (enable_syslog)
syslog (get_syslog_level (log_level), "%s", message);
#endif
#endif
}
static int
get_debug_level(const char *str, int default_level)
{
if (strcmp(str, "debug") == 0)
return G_LOG_LEVEL_DEBUG;
if (strcmp(str, "info") == 0)
return G_LOG_LEVEL_INFO;
if (strcmp(str, "warning") == 0)
return G_LOG_LEVEL_WARNING;
return default_level;
}
int
seafile_log_init (const char *_logfile, const char *ccnet_debug_level_str,
const char *seafile_debug_level_str, const char *_app_name)
{
g_log_set_handler (NULL, G_LOG_LEVEL_MASK | G_LOG_FLAG_FATAL
| G_LOG_FLAG_RECURSION, seafile_log, NULL);
g_log_set_handler ("Ccnet", G_LOG_LEVEL_MASK | G_LOG_FLAG_FATAL
| G_LOG_FLAG_RECURSION, ccnet_log, NULL);
/* record all log message */
ccnet_log_level = get_debug_level(ccnet_debug_level_str, G_LOG_LEVEL_INFO);
seafile_log_level = get_debug_level(seafile_debug_level_str, G_LOG_LEVEL_DEBUG);
app_name = g_strdup (_app_name);
const char *log_to_stdout_env = g_getenv("SEAFILE_LOG_TO_STDOUT");
if (g_strcmp0(log_to_stdout_env, "true") == 0) {
logfp = stdout;
logfile = g_strdup (_logfile);
log_to_stdout = TRUE;
} else if (g_strcmp0(_logfile, "-") == 0) {
logfp = stdout;
logfile = g_strdup (_logfile);
} else {
logfile = ccnet_expand_path(_logfile);
if ((logfp = g_fopen (logfile, "a+")) == NULL) {
seaf_message ("Failed to open file %s\n", logfile);
return -1;
}
}
return 0;
}
int
seafile_log_reopen ()
{
FILE *fp, *oldfp;
if (g_strcmp0(logfile, "-") == 0 || log_to_stdout)
return 0;
if ((fp = g_fopen (logfile, "a+")) == NULL) {
seaf_message ("Failed to open file %s\n", logfile);
return -1;
}
//TODO: check file's health
oldfp = logfp;
logfp = fp;
if (fclose(oldfp) < 0) {
seaf_message ("Failed to close file %s\n", logfile);
return -1;
}
return 0;
}
static SeafileDebugFlags debug_flags = 0;
static GDebugKey debug_keys[] = {
{ "Transfer", SEAFILE_DEBUG_TRANSFER },
{ "Sync", SEAFILE_DEBUG_SYNC },
{ "Watch", SEAFILE_DEBUG_WATCH },
{ "Http", SEAFILE_DEBUG_HTTP },
{ "Merge", SEAFILE_DEBUG_MERGE },
{ "Other", SEAFILE_DEBUG_OTHER },
};
gboolean
seafile_debug_flag_is_set (SeafileDebugFlags flag)
{
return (debug_flags & flag) != 0;
}
void
seafile_debug_set_flags (SeafileDebugFlags flags)
{
g_message ("Set debug flags %#x\n", flags);
debug_flags |= flags;
}
void
seafile_debug_set_flags_string (const gchar *flags_string)
{
guint nkeys = G_N_ELEMENTS (debug_keys);
if (flags_string)
seafile_debug_set_flags (
g_parse_debug_string (flags_string, debug_keys, nkeys));
}
void
seafile_debug_impl (SeafileDebugFlags flag, const gchar *format, ...)
{
if (flag & debug_flags) {
va_list args;
va_start (args, format);
g_logv (G_LOG_DOMAIN, G_LOG_LEVEL_DEBUG, format, args);
va_end (args);
}
}
#ifndef WIN32
#ifdef SEAFILE_SERVER
void
set_syslog_config (GKeyFile *config)
{
enable_syslog = g_key_file_get_boolean (config,
"general", "enable_syslog",
NULL);
if (enable_syslog)
openlog (NULL, LOG_NDELAY | LOG_PID, LOG_USER);
}
#endif
#endif
================================================
FILE: common/log.h
================================================
#ifndef LOG_H
#define LOG_H
#define SEAFILE_DOMAIN g_quark_from_string("seafile")
#ifndef seaf_warning
#define seaf_warning(fmt, ...) g_warning("%s(%d): " fmt, __FILE__, __LINE__, ##__VA_ARGS__)
#endif
#ifndef seaf_message
#define seaf_message(fmt, ...) g_message("%s(%d): " fmt, __FILE__, __LINE__, ##__VA_ARGS__)
#endif
#ifndef seaf_error
#define seaf_error(fmt, ...) g_critical("%s(%d): " fmt, __FILE__, __LINE__, ##__VA_ARGS__)
#endif
int seafile_log_init (const char *logfile, const char *ccnet_debug_level_str,
const char *seafile_debug_level_str, const char *_app_name);
int seafile_log_reopen ();
#ifndef WIN32
#ifdef SEAFILE_SERVER
void
set_syslog_config (GKeyFile *config);
#endif
#endif
void
seafile_debug_set_flags_string (const gchar *flags_string);
typedef enum
{
SEAFILE_DEBUG_TRANSFER = 1 << 1,
SEAFILE_DEBUG_SYNC = 1 << 2,
SEAFILE_DEBUG_WATCH = 1 << 3, /* wt-monitor */
SEAFILE_DEBUG_HTTP = 1 << 4, /* http server */
SEAFILE_DEBUG_MERGE = 1 << 5,
SEAFILE_DEBUG_OTHER = 1 << 6,
} SeafileDebugFlags;
void seafile_debug_impl (SeafileDebugFlags flag, const gchar *format, ...);
#ifdef DEBUG_FLAG
#undef seaf_debug
#define seaf_debug(fmt, ...) \
seafile_debug_impl (DEBUG_FLAG, "%.10s(%d): " fmt, __FILE__, __LINE__, ##__VA_ARGS__)
#endif /* DEBUG_FLAG */
#endif
================================================
FILE: common/merge-new.c
================================================
#include "common.h"
#include "seafile-session.h"
#include "merge-new.h"
#include "vc-common.h"
#define DEBUG_FLAG SEAFILE_DEBUG_MERGE
#include "log.h"
static int
merge_trees_recursive (const char *store_id, int version,
int n, SeafDir *trees[],
const char *basedir,
MergeOptions *opt);
static const char *
get_nickname_by_modifier (GHashTable *email_to_nickname, const char *modifier)
{
const char *nickname = NULL;
if (!modifier) {
return NULL;
}
nickname = g_hash_table_lookup (email_to_nickname, modifier);
if (nickname) {
return nickname;
}
nickname = http_tx_manager_get_nickname (modifier);
if (!nickname) {
nickname = g_strdup (modifier);
}
g_hash_table_insert (email_to_nickname, g_strdup(modifier), nickname);
return nickname;
}
static char *
merge_conflict_filename (const char *store_id, int version,
MergeOptions *opt,
const char *basedir,
const char *filename)
{
char *path = NULL, *modifier = NULL, *conflict_name = NULL;
const char *nickname = NULL;
gint64 mtime;
SeafCommit *commit;
path = g_strconcat (basedir, filename, NULL);
int rc = get_file_modifier_mtime (opt->remote_repo_id,
store_id,
version,
opt->remote_head,
path,
&modifier, &mtime);
if (rc < 0) {
commit = seaf_commit_manager_get_commit (seaf->commit_mgr,
opt->remote_repo_id,
version,
opt->remote_head);
if (!commit) {
seaf_warning ("Failed to find remote head %s:%s.\n",
opt->remote_repo_id, opt->remote_head);
goto out;
}
modifier = g_strdup(commit->creator_name);
mtime = (gint64)time(NULL);
seaf_commit_unref (commit);
}
nickname = modifier;
if (seaf->seahub_pk)
nickname = get_nickname_by_modifier (opt->email_to_nickname, modifier);
conflict_name = gen_conflict_path (filename, nickname, mtime);
out:
g_free (path);
g_free (modifier);
return conflict_name;
}
static char *
merge_conflict_dirname (const char *store_id, int version,
MergeOptions *opt,
const char *basedir,
const char *dirname)
{
char *modifier = NULL, *conflict_name = NULL;
const char *nickname = NULL;
SeafCommit *commit;
commit = seaf_commit_manager_get_commit (seaf->commit_mgr,
opt->remote_repo_id, version,
opt->remote_head);
if (!commit) {
seaf_warning ("Failed to find remote head %s:%s.\n",
opt->remote_repo_id, opt->remote_head);
goto out;
}
modifier = g_strdup(commit->creator_name);
seaf_commit_unref (commit);
nickname = modifier;
if (seaf->seahub_pk)
nickname = get_nickname_by_modifier (opt->email_to_nickname, modifier);
conflict_name = gen_conflict_path (dirname, nickname, (gint64)time(NULL));
out:
g_free (modifier);
return conflict_name;
}
int twoway_merge(const char *store_id, int version, const char *basedir,
SeafDirent *dents[], GList **dents_out, struct MergeOptions *opt)
{
SeafDirent *files[2];
int i;
int n = opt->n_ways;
memset (files, 0, sizeof(files[0])*n);
for (i = 0; i < n; ++i) {
if (dents[i] && S_ISREG(dents[i]->mode))
files[i] = dents[i];
}
SeafDirent *head, *remote;
char *conflict_name;
head = files[0];
remote = files[1];
if (head && remote) {
if (strcmp (head->id, remote->id) == 0) {
// file match
seaf_debug ("%s%s: files match\n", basedir, head->name);
*dents_out = g_list_prepend (*dents_out, seaf_dirent_dup(head));
} else {
// file content conflict
seaf_debug ("%s%s: files conflict\n", basedir, head->name);
conflict_name = merge_conflict_filename(store_id, version,
opt,
basedir,
head->name);
if (!conflict_name)
return -1;
g_free (remote->name);
remote->name = conflict_name;
remote->name_len = strlen (remote->name);
*dents_out = g_list_prepend (*dents_out, seaf_dirent_dup(head));
*dents_out = g_list_prepend (*dents_out, seaf_dirent_dup(remote));
opt->conflict = TRUE;
}
} else if (!head && remote) {
// file not in head, but in remote
seaf_debug ("%s%s: added in remote\n", basedir, remote->name);
*dents_out = g_list_prepend (*dents_out, seaf_dirent_dup(remote));
} else if (head && !remote) {
// file in head, but not in remote
seaf_debug ("%s%s: added in head\n", basedir, head->name);
*dents_out = g_list_prepend (*dents_out, seaf_dirent_dup(head));
}
return 0;
}
static int
threeway_merge (const char *store_id, int version,
SeafDirent *dents[],
const char *basedir,
GList **dents_out,
MergeOptions *opt)
{
SeafDirent *files[3];
int i;
gint64 curr_time;
int n = opt->n_ways;
memset (files, 0, sizeof(files[0])*n);
for (i = 0; i < n; ++i) {
if (dents[i] && S_ISREG(dents[i]->mode))
files[i] = dents[i];
}
SeafDirent *base, *head, *remote;
char *conflict_name;
base = files[0];
head = files[1];
remote = files[2];
if (head && remote) {
if (strcmp (head->id, remote->id) == 0) {
seaf_debug ("%s%s: files match\n", basedir, head->name);
*dents_out = g_list_prepend (*dents_out, seaf_dirent_dup(head));
} else if (base && strcmp (base->id, head->id) == 0) {
seaf_debug ("%s%s: unchanged in head, changed in remote\n",
basedir, head->name);
*dents_out = g_list_prepend (*dents_out, seaf_dirent_dup(remote));
} else if (base && strcmp (base->id, remote->id) == 0) {
seaf_debug ("%s%s: unchanged in remote, changed in head\n",
basedir, head->name);
*dents_out = g_list_prepend (*dents_out, seaf_dirent_dup(head));
} else {
/* File content conflict. */
seaf_debug ("%s%s: files conflict\n", basedir, head->name);
conflict_name = merge_conflict_filename(store_id, version,
opt,
basedir,
head->name);
if (!conflict_name)
return -1;
/* Change remote entry name in place. So opt->callback
* will see the conflict name, not the original name.
*/
g_free (remote->name);
remote->name = conflict_name;
remote->name_len = strlen (remote->name);
*dents_out = g_list_prepend (*dents_out, seaf_dirent_dup(head));
*dents_out = g_list_prepend (*dents_out, seaf_dirent_dup(remote));
opt->conflict = TRUE;
}
} else if (base && !head && remote) {
if (strcmp (base->id, remote->id) != 0) {
if (dents[1] != NULL) {
/* D/F conflict:
* Head replaces file with dir, while remote change the file.
*/
seaf_debug ("%s%s: DFC, file -> dir, file\n",
basedir, remote->name);
conflict_name = merge_conflict_filename(store_id, version,
opt,
basedir,
remote->name);
if (!conflict_name)
return -1;
/* Change the name of remote, keep dir name in head unchanged.
*/
g_free (remote->name);
remote->name = conflict_name;
remote->name_len = strlen (remote->name);
*dents_out = g_list_prepend (*dents_out, seaf_dirent_dup(remote));
opt->conflict = TRUE;
} else {
/* Deleted in head and changed in remote. */
seaf_debug ("%s%s: deleted in head and changed in remote\n",
basedir, remote->name);
/* Keep version of remote. */
*dents_out = g_list_prepend (*dents_out, seaf_dirent_dup(remote));
}
} else {
/* If base and remote match, the file should not be added to
* the merge result.
*/
seaf_debug ("%s%s: file deleted in head, unchanged in remote\n",
basedir, remote->name);
}
} else if (base && head && !remote) {
if (strcmp (base->id, head->id) != 0) {
if (dents[2] != NULL) {
/* D/F conflict:
* Remote replaces file with dir, while head change the file.
*/
seaf_debug ("%s%s: DFC, file -> file, dir\n",
basedir, head->name);
/* We use remote head commit author name as conflict
* suffix of a dir.
*/
conflict_name = merge_conflict_dirname (store_id, version,
opt,
basedir, dents[2]->name);
if (!conflict_name)
return -1;
/* Change remote dir name to conflict name in place. */
g_free (dents[2]->name);
dents[2]->name = conflict_name;
dents[2]->name_len = strlen (dents[2]->name);
*dents_out = g_list_prepend (*dents_out, seaf_dirent_dup(head));
opt->conflict = TRUE;
} else {
/* Deleted in remote and changed in head. */
seaf_debug ("%s%s: deleted in remote and changed in head\n",
basedir, head->name);
/* Keep version of remote. */
*dents_out = g_list_prepend (*dents_out, seaf_dirent_dup(head));
}
} else {
/* If base and head match, the file should not be added to
* the merge result.
*/
seaf_debug ("%s%s: file deleted in remote, unchanged in head\n",
basedir, head->name);
}
} else if (!base && !head && remote) {
if (!dents[1]) {
/* Added in remote. */
seaf_debug ("%s%s: added in remote\n", basedir, remote->name);
*dents_out = g_list_prepend (*dents_out, seaf_dirent_dup(remote));
} else if (dents[0] != NULL && strcmp(dents[0]->id, dents[1]->id) == 0) {
/* Contents in the dir is not changed.
* The dir will be deleted in merge_directories().
*/
seaf_debug ("%s%s: dir in head will be replaced by file in remote\n",
basedir, remote->name);
*dents_out = g_list_prepend (*dents_out, seaf_dirent_dup(remote));
} else {
/* D/F conflict:
* Contents of the dir is changed in head, while
* remote replace the dir with a file.
*
* Or, head adds a new dir, while remote adds a new file,
* with the same name.
*/
seaf_debug ("%s%s: DFC, dir -> dir, file\n", basedir, remote->name);
conflict_name = merge_conflict_filename(store_id, version,
opt,
basedir,
remote->name);
if (!conflict_name)
return -1;
g_free (remote->name);
remote->name = conflict_name;
remote->name_len = strlen (remote->name);
*dents_out = g_list_prepend (*dents_out, seaf_dirent_dup(remote));
opt->conflict = TRUE;
}
} else if (!base && head && !remote) {
if (!dents[2]) {
/* Added in remote. */
seaf_debug ("%s%s: added in head\n", basedir, head->name);
*dents_out = g_list_prepend (*dents_out, seaf_dirent_dup(head));
} else if (dents[0] != NULL && strcmp(dents[0]->id, dents[2]->id) == 0) {
/* Contents in the dir is not changed.
* The dir will be deleted in merge_directories().
*/
seaf_debug ("%s%s: dir in remote will be replaced by file in head\n",
basedir, head->name);
*dents_out = g_list_prepend (*dents_out, seaf_dirent_dup(head));
} else {
/* D/F conflict:
* Contents of the dir is changed in remote, while
* head replace the dir with a file.
*
* Or, remote adds a new dir, while head adds a new file,
* with the same name.
*/
seaf_debug ("%s%s: DFC, dir -> file, dir\n", basedir, head->name);
conflict_name = merge_conflict_dirname (store_id, version,
opt,
basedir, dents[2]->name);
if (!conflict_name)
return -1;
g_free (dents[2]->name);
dents[2]->name = conflict_name;
dents[2]->name_len = strlen (dents[2]->name);
*dents_out = g_list_prepend (*dents_out, seaf_dirent_dup(head));
opt->conflict = TRUE;
}
} else if (base && !head && !remote) {
/* Don't need to add anything to dents_out. */
seaf_debug ("%s%s: deleted in head and remote\n", basedir, base->name);
}
return 0;
}
static int
merge_entries (const char *store_id, int version,
int n, SeafDirent *dents[],
const char *basedir,
GList **dents_out,
MergeOptions *opt)
{
/* If we're running 2-way merge, it means merge files base on head and remote.
*/
if (n == 2)
return twoway_merge (store_id, version, basedir, dents, dents_out, opt);
/* Otherwise, we're doing a real 3-way merge of the trees.
* It means merge files and handle any conflicts.
*/
return threeway_merge (store_id, version, dents, basedir, dents_out, opt);
}
static int
merge_directories (const char *store_id, int version,
int n, SeafDirent *dents[],
const char *basedir,
GList **dents_out,
MergeOptions *opt)
{
SeafDir *dir;
SeafDir *sub_dirs[3];
char *dirname = NULL;
char *new_basedir;
int ret = 0;
int dir_mask = 0, i;
SeafDirent *merged_dent;
for (i = 0; i < n; ++i) {
if (dents[i] && S_ISDIR(dents[i]->mode))
dir_mask |= 1 << i;
}
seaf_debug ("dir_mask = %d\n", dir_mask);
if (n == 3) {
switch (dir_mask) {
case 0:
g_return_val_if_reached (-1);
case 1:
/* head and remote are not dirs, nothing to merge. */
seaf_debug ("%s%s: no dir, no need to merge\n", basedir, dents[0]->name);
return 0;
case 2:
/* only head is dir, add to result directly, no need to merge. */
seaf_debug ("%s%s: only head is dir\n", basedir, dents[1]->name);
*dents_out = g_list_prepend (*dents_out, seaf_dirent_dup(dents[1]));
return 0;
case 3:
if (strcmp (dents[0]->id, dents[1]->id) == 0) {
/* Base and head are the same, but deleted in remote. */
seaf_debug ("%s%s: dir deleted in remote\n", basedir, dents[0]->name);
return 0;
}
seaf_debug ("%s%s: dir changed in head but deleted in remote\n",
basedir, dents[1]->name);
break;
case 4:
/* only remote is dir, add to result directly, no need to merge. */
seaf_debug ("%s%s: only remote is dir\n", basedir, dents[2]->name);
*dents_out = g_list_prepend (*dents_out, seaf_dirent_dup(dents[2]));
return 0;
case 5:
if (strcmp (dents[0]->id, dents[2]->id) == 0) {
/* Base and remote are the same, but deleted in head. */
seaf_debug ("%s%s: dir deleted in head\n", basedir, dents[0]->name);
return 0;
}
seaf_debug ("%s%s: dir changed in remote but deleted in head\n",
basedir, dents[2]->name);
break;
case 6:
case 7:
if (strcmp (dents[1]->id, dents[2]->id) == 0) {
/* Head and remote match. */
seaf_debug ("%s%s: dir is the same in head and remote\n",
basedir, dents[1]->name);
*dents_out = g_list_prepend (*dents_out, seaf_dirent_dup(dents[1]));
return 0;
} else if (dents[0] && strcmp(dents[0]->id, dents[1]->id) == 0) {
seaf_debug ("%s%s: dir changed in remote but unchanged in head\n",
basedir, dents[1]->name);
*dents_out = g_list_prepend (*dents_out, seaf_dirent_dup(dents[2]));
return 0;
} else if (dents[0] && strcmp(dents[0]->id, dents[2]->id) == 0) {
seaf_debug ("%s%s: dir changed in head but unchanged in remote\n",
basedir, dents[1]->name);
*dents_out = g_list_prepend (*dents_out, seaf_dirent_dup(dents[1]));
return 0;
}
seaf_debug ("%s%s: dir is changed in both head and remote, "
"merge recursively\n", basedir, dents[1]->name);
break;
default:
g_return_val_if_reached (-1);
}
} else if (n == 2) {
switch (dir_mask) {
case 0:
g_return_val_if_reached (-1);
case 1:
/*head is dir, remote is not dir*/
seaf_debug ("%s%s: only head is dir\n", basedir, dents[0]->name);
*dents_out = g_list_prepend (*dents_out, seaf_dirent_dup(dents[0]));
return 0;
case 2:
/*head is not dir, remote is dir*/
seaf_debug ("%s%s: only remote is dir\n", basedir, dents[1]->name);
*dents_out = g_list_prepend (*dents_out, seaf_dirent_dup(dents[1]));
return 0;
case 3:
if (strcmp (dents[0]->id, dents[1]->id) == 0) {
seaf_debug ("%s%s: dir is the same in head and remote\n",
basedir, dents[0]->name);
*dents_out = g_list_prepend (*dents_out, seaf_dirent_dup(dents[1]));
return 0;
}
seaf_debug ("%s%s: dir is changed in head and remote, merge recursively\n",
basedir, dents[0]->name);
break;
default:
g_return_val_if_reached (-1);
}
}
memset (sub_dirs, 0, sizeof(sub_dirs[0])*n);
for (i = 0; i < n; ++i) {
if (dents[i] != NULL && S_ISDIR(dents[i]->mode)) {
dir = seaf_fs_manager_get_seafdir (seaf->fs_mgr,
store_id, version,
dents[i]->id);
if (!dir) {
seaf_warning ("Failed to find dir %s:%s.\n", store_id, dents[i]->id);
ret = -1;
goto free_sub_dirs;
}
opt->visit_dirs++;
sub_dirs[i] = dir;
dirname = dents[i]->name;
}
}
new_basedir = g_strconcat (basedir, dirname, "/", NULL);
ret = merge_trees_recursive (store_id, version, n, sub_dirs, new_basedir, opt);
g_free (new_basedir);
if (n == 3) {
if (dir_mask == 3 || dir_mask == 6 || dir_mask == 7) {
merged_dent = seaf_dirent_dup (dents[1]);
memcpy (merged_dent->id, opt->merged_tree_root, 40);
*dents_out = g_list_prepend (*dents_out, merged_dent);
} else if (dir_mask == 5) {
merged_dent = seaf_dirent_dup (dents[2]);
memcpy (merged_dent->id, opt->merged_tree_root, 40);
*dents_out = g_list_prepend (*dents_out, merged_dent);
}
} else if (n == 2) {
if (dir_mask == 3) {
merged_dent = seaf_dirent_dup (dents[1]);
memcpy (merged_dent->id, opt->merged_tree_root, 40);
*dents_out = g_list_prepend (*dents_out, merged_dent);
}
}
free_sub_dirs:
for (i = 0; i < n; ++i)
seaf_dir_free (sub_dirs[i]);
return ret;
}
static gint
compare_dirents (gconstpointer a, gconstpointer b)
{
const SeafDirent *denta = a, *dentb = b;
return strcmp (dentb->name, denta->name);
}
static int
merge_trees_recursive (const char *store_id, int version,
int n, SeafDir *trees[],
const char *basedir,
MergeOptions *opt)
{
GList *ptrs[3];
SeafDirent *dents[3];
int i;
SeafDirent *dent;
char *first_name;
gboolean done;
int ret = 0;
SeafDir *merged_tree;
GList *merged_dents = NULL;
for (i = 0; i < n; ++i) {
if (trees[i])
ptrs[i] = trees[i]->entries;
else
ptrs[i] = NULL;
}
while (1) {
first_name = NULL;
memset (dents, 0, sizeof(dents[0])*n);
done = TRUE;
/* Find the "largest" name, assuming dirents are sorted. */
for (i = 0; i < n; ++i) {
if (ptrs[i] != NULL) {
done = FALSE;
dent = ptrs[i]->data;
if (!first_name)
first_name = dent->name;
else if (strcmp(dent->name, first_name) > 0)
first_name = dent->name;
}
}
if (done)
break;
/*
* Setup dir entries for all names that equal to first_name
*/
int n_files = 0, n_dirs = 0;
for (i = 0; i < n; ++i) {
if (ptrs[i] != NULL) {
dent = ptrs[i]->data;
if (strcmp(first_name, dent->name) == 0) {
if (S_ISREG(dent->mode))
++n_files;
else if (S_ISDIR(dent->mode))
++n_dirs;
dents[i] = dent;
ptrs[i] = ptrs[i]->next;
}
}
}
/* Merge entries of this level. */
if (n_files > 0) {
ret = merge_entries (store_id, version,
n, dents, basedir, &merged_dents, opt);
if (ret < 0)
return ret;
}
/* Recurse into sub level. */
if (n_dirs > 0) {
ret = merge_directories (store_id, version,
n, dents, basedir, &merged_dents, opt);
if (ret < 0)
return ret;
}
}
if (n == 3) {
merged_dents = g_list_sort (merged_dents, compare_dirents);
merged_tree = seaf_dir_new (NULL, merged_dents,
dir_version_from_repo_version(version));
memcpy (opt->merged_tree_root, merged_tree->dir_id, 40);
if ((trees[1] && strcmp (trees[1]->dir_id, merged_tree->dir_id) == 0) ||
(trees[2] && strcmp (trees[2]->dir_id, merged_tree->dir_id) == 0)) {
seaf_dir_free (merged_tree);
} else {
ret = seaf_dir_save (seaf->fs_mgr, store_id, version, merged_tree);
seaf_dir_free (merged_tree);
if (ret < 0) {
seaf_warning ("Failed to save merged tree %s:%s.\n", store_id, basedir);
}
}
} else if (n == 2) {
merged_dents = g_list_sort (merged_dents, compare_dirents);
merged_tree = seaf_dir_new (NULL, merged_dents,
dir_version_from_repo_version(version));
memcpy (opt->merged_tree_root, merged_tree->dir_id, 40);
if ((trees[0] && strcmp (trees[0]->dir_id, merged_tree->dir_id) == 0) ||
(trees[1] && strcmp (trees[1]->dir_id, merged_tree->dir_id) == 0)) {
seaf_dir_free (merged_tree);
} else {
ret = seaf_dir_save (seaf->fs_mgr, store_id, version, merged_tree);
seaf_dir_free (merged_tree);
if (ret < 0) {
seaf_warning ("Failed to save merged tree %s:%s.\n", store_id, basedir);
}
}
}
return ret;
}
int
seaf_merge_trees (const char *store_id, int version,
int n, const char *roots[], MergeOptions *opt)
{
SeafDir **trees, *root;
int i, ret;
g_return_val_if_fail (n == 2 || n == 3, -1);
opt->email_to_nickname = g_hash_table_new_full(g_str_hash,
g_str_equal,
g_free,
g_free);
trees = g_new0 (SeafDir *, n);
for (i = 0; i < n; ++i) {
root = seaf_fs_manager_get_seafdir (seaf->fs_mgr, store_id, version, roots[i]);
if (!root) {
seaf_warning ("Failed to find dir %s:%s.\n", store_id, roots[i]);
g_free (trees);
return -1;
}
trees[i] = root;
}
ret = merge_trees_recursive (store_id, version, n, trees, "", opt);
for (i = 0; i < n; ++i)
seaf_dir_free (trees[i]);
g_free (trees);
g_hash_table_destroy (opt->email_to_nickname);
return ret;
}
================================================
FILE: common/merge-new.h
================================================
#ifndef MERGE_NEW_H
#define MERGE_NEW_H
#include "common.h"
#include "fs-mgr.h"
struct MergeOptions;
typedef int (*MergeCallback) (const char *basedir,
SeafDirent *dirents[],
struct MergeOptions *opt);
typedef struct MergeOptions {
int n_ways; /* only 2 and 3 way merges are supported. */
MergeCallback callback;
void * data;
/* options only used in 3-way merge. */
char remote_repo_id[37];
char remote_head[41];
gboolean do_merge; /* really merge the contents
* and handle conflicts */
char merged_tree_root[41]; /* merge result */
int visit_dirs;
gboolean conflict;
GHashTable *email_to_nickname;
} MergeOptions;
int
seaf_merge_trees (const char *store_id, int version,
int n, const char *roots[], MergeOptions *opt);
#endif
================================================
FILE: common/mq-mgr.c
================================================
#include "common.h"
#include "log.h"
#include "utils.h"
#include "mq-mgr.h"
typedef struct SeafMqManagerPriv {
// chan <-> async_queue
GHashTable *chans;
} SeafMqManagerPriv;
SeafMqManager *
seaf_mq_manager_new ()
{
SeafMqManager *mgr = g_new0 (SeafMqManager, 1);
mgr->priv = g_new0 (SeafMqManagerPriv, 1);
mgr->priv->chans = g_hash_table_new_full (g_str_hash, g_str_equal,
(GDestroyNotify)g_free,
(GDestroyNotify)g_async_queue_unref);
return mgr;
}
static GAsyncQueue *
seaf_mq_manager_channel_new (SeafMqManager *mgr, const char *channel)
{
GAsyncQueue *async_queue = NULL;
async_queue = g_async_queue_new_full ((GDestroyNotify)json_decref);
g_hash_table_replace (mgr->priv->chans, g_strdup (channel), async_queue);
return async_queue;
}
int
seaf_mq_manager_publish_event (SeafMqManager *mgr, const char *channel, const char *content)
{
int ret = 0;
if (!channel || !content) {
seaf_warning ("type and content should not be NULL.\n");
return -1;
}
GAsyncQueue *async_queue = g_hash_table_lookup (mgr->priv->chans, channel);
if (!async_queue) {
async_queue = seaf_mq_manager_channel_new(mgr, channel);
}
if (!async_queue) {
seaf_warning("%s channel creation failed.\n", channel);
return -1;
}
json_t *msg = json_object();
json_object_set_new (msg, "content", json_string(content));
json_object_set_new (msg, "ctime", json_integer(time(NULL)));
g_async_queue_push (async_queue, msg);
return ret;
}
json_t *
seaf_mq_manager_pop_event (SeafMqManager *mgr, const char *channel)
{
GAsyncQueue *async_queue = g_hash_table_lookup (mgr->priv->chans, channel);
if (!async_queue)
return NULL;
return g_async_queue_try_pop (async_queue);
}
================================================
FILE: common/mq-mgr.h
================================================
#ifndef SEAF_MQ_MANAGER_H
#define SEAF_MQ_MANAGER_H
#include
#define SEAFILE_SERVER_CHANNEL_EVENT "seaf_server.event"
#define SEAFILE_SERVER_CHANNEL_STATS "seaf_server.stats"
struct SeafMqManagerPriv;
typedef struct SeafMqManager {
struct SeafMqManagerPriv *priv;
} SeafMqManager;
SeafMqManager *
seaf_mq_manager_new ();
int
seaf_mq_manager_publish_event (SeafMqManager *mgr, const char *channel, const char *content);
json_t *
seaf_mq_manager_pop_event (SeafMqManager *mgr, const char *channel);
#endif
================================================
FILE: common/obj-backend-fs.c
================================================
#ifndef _WIN32_WINNT
#define _WIN32_WINNT 0x500
#endif
#include "common.h"
#include "utils.h"
#include "obj-backend.h"
#ifndef WIN32
#include
#include
#include
#endif
#ifdef WIN32
#include
#include
#endif
#define DEBUG_FLAG SEAFILE_DEBUG_OTHER
#include "log.h"
typedef struct FsPriv {
char *obj_dir;
int dir_len;
} FsPriv;
static void
id_to_path (FsPriv *priv, const char *obj_id, char path[],
const char *repo_id, int version)
{
char *pos = path;
int n;
#if defined MIGRATION || defined SEAFILE_CLIENT
if (version > 0) {
n = snprintf (path, SEAF_PATH_MAX, "%s/%s/", priv->obj_dir, repo_id);
pos += n;
}
#else
n = snprintf (path, SEAF_PATH_MAX, "%s/%s/", priv->obj_dir, repo_id);
pos += n;
#endif
memcpy (pos, obj_id, 2);
pos[2] = '/';
pos += 3;
memcpy (pos, obj_id + 2, 41 - 2);
}
static int
obj_backend_fs_read (ObjBackend *bend,
const char *repo_id,
int version,
const char *obj_id,
void **data,
int *len)
{
char path[SEAF_PATH_MAX];
gsize tmp_len;
GError *error = NULL;
id_to_path (bend->priv, obj_id, path, repo_id, version);
/* seaf_debug ("object path: %s\n", path); */
g_file_get_contents (path, (gchar**)data, &tmp_len, &error);
if (error) {
#ifdef MIGRATION
g_clear_error (&error);
id_to_path (bend->priv, obj_id, path, repo_id, 1);
g_file_get_contents (path, (gchar**)data, &tmp_len, &error);
if (error) {
seaf_debug ("[obj backend] Failed to read object %s: %s.\n",
obj_id, error->message);
g_clear_error (&error);
return -1;
}
#else
seaf_debug ("[obj backend] Failed to read object %s: %s.\n",
obj_id, error->message);
g_clear_error (&error);
return -1;
#endif
}
*len = (int)tmp_len;
return 0;
}
/*
* Flush operating system and disk caches for @fd.
*/
static int
fsync_obj_contents (int fd)
{
#ifdef __linux__
/* Some file systems may not support fsync().
* In this case, just skip the error.
*/
if (fsync (fd) < 0) {
if (errno == EINVAL)
return 0;
else {
seaf_warning ("Failed to fsync: %s.\n", strerror(errno));
return -1;
}
}
return 0;
#endif
#ifdef __APPLE__
/* OS X: fcntl() is required to flush disk cache, fsync() only
* flushes operating system cache.
*/
if (fcntl (fd, F_FULLFSYNC, NULL) < 0) {
seaf_warning ("Failed to fsync: %s.\n", strerror(errno));
return -1;
}
return 0;
#endif
#ifdef WIN32
HANDLE handle;
handle = (HANDLE)_get_osfhandle (fd);
if (handle == INVALID_HANDLE_VALUE) {
seaf_warning ("Failed to get handle from fd.\n");
return -1;
}
if (!FlushFileBuffers (handle)) {
seaf_warning ("FlushFileBuffer() failed: %lu.\n", GetLastError());
return -1;
}
return 0;
#endif
}
/*
* Rename file from @tmp_path to @obj_path.
* This also makes sure the changes to @obj_path's parent folder
* is flushed to disk.
*/
static int
rename_and_sync (const char *tmp_path, const char *obj_path)
{
#ifdef __linux__
char *parent_dir;
int ret = 0;
if (rename (tmp_path, obj_path) < 0) {
seaf_warning ("Failed to rename from %s to %s: %s.\n",
tmp_path, obj_path, strerror(errno));
return -1;
}
parent_dir = g_path_get_dirname (obj_path);
int dir_fd = open (parent_dir, O_RDONLY);
if (dir_fd < 0) {
seaf_warning ("Failed to open dir %s: %s.\n", parent_dir, strerror(errno));
goto out;
}
/* Some file systems don't support fsyncing a directory. Just ignore the error.
*/
if (fsync (dir_fd) < 0) {
if (errno != EINVAL) {
seaf_warning ("Failed to fsync dir %s: %s.\n",
parent_dir, strerror(errno));
ret = -1;
}
goto out;
}
out:
g_free (parent_dir);
if (dir_fd >= 0)
close (dir_fd);
return ret;
#endif
#ifdef __APPLE__
/*
* OS X garantees an existence of obj_path always exists,
* even when the system crashes.
*/
if (rename (tmp_path, obj_path) < 0) {
seaf_warning ("Failed to rename from %s to %s: %s.\n",
tmp_path, obj_path, strerror(errno));
return -1;
}
return 0;
#endif
#ifdef WIN32
wchar_t *w_tmp_path = g_utf8_to_utf16 (tmp_path, -1, NULL, NULL, NULL);
wchar_t *w_obj_path = g_utf8_to_utf16 (obj_path, -1, NULL, NULL, NULL);
int ret = 0;
if (!MoveFileExW (w_tmp_path, w_obj_path,
MOVEFILE_REPLACE_EXISTING | MOVEFILE_WRITE_THROUGH)) {
seaf_warning ("MoveFilExW failed: %lu.\n", GetLastError());
ret = -1;
goto out;
}
out:
g_free (w_tmp_path);
g_free (w_obj_path);
return ret;
#endif
}
static int
save_obj_contents (const char *path, const void *data, int len, gboolean need_sync)
{
char tmp_path[SEAF_PATH_MAX];
int fd;
snprintf (tmp_path, SEAF_PATH_MAX, "%s.XXXXXX", path);
fd = g_mkstemp (tmp_path);
if (fd < 0) {
seaf_warning ("[obj backend] Failed to open tmp file %s: %s.\n",
tmp_path, strerror(errno));
return -1;
}
if (writen (fd, data, len) < 0) {
seaf_warning ("[obj backend] Failed to write obj %s: %s.\n",
tmp_path, strerror(errno));
return -1;
}
if (need_sync && fsync_obj_contents (fd) < 0)
return -1;
/* Close may return error, especially in NFS. */
if (close (fd) < 0) {
seaf_warning ("[obj backend Failed close obj %s: %s.\n",
tmp_path, strerror(errno));
return -1;
}
if (need_sync) {
if (rename_and_sync (tmp_path, path) < 0)
return -1;
} else {
if (g_rename (tmp_path, path) < 0) {
seaf_warning ("[obj backend] Failed to rename %s: %s.\n",
path, strerror(errno));
return -1;
}
}
return 0;
}
static int
create_parent_path (const char *path)
{
char *dir = g_path_get_dirname (path);
if (!dir)
return -1;
if (g_file_test (dir, G_FILE_TEST_EXISTS)) {
g_free (dir);
return 0;
}
if (g_mkdir_with_parents (dir, 0777) < 0) {
seaf_warning ("Failed to create object parent path %s: %s.\n",
dir, strerror(errno));
g_free (dir);
return -1;
}
g_free (dir);
return 0;
}
static int
obj_backend_fs_write (ObjBackend *bend,
const char *repo_id,
int version,
const char *obj_id,
void *data,
int len,
gboolean need_sync)
{
char path[SEAF_PATH_MAX];
id_to_path (bend->priv, obj_id, path, repo_id, version);
/* GTimeVal s, e; */
/* g_get_current_time (&s); */
if (create_parent_path (path) < 0) {
seaf_warning ("[obj backend] Failed to create path for obj %s:%s.\n",
repo_id, obj_id);
return -1;
}
if (save_obj_contents (path, data, len, need_sync) < 0) {
seaf_warning ("[obj backend] Failed to write obj %s:%s.\n",
repo_id, obj_id);
return -1;
}
/* g_get_current_time (&e); */
/* seaf_message ("write obj time: %ldus.\n", */
/* ((e.tv_sec*1000000+e.tv_usec) - (s.tv_sec*1000000+s.tv_usec))); */
return 0;
}
static gboolean
obj_backend_fs_exists (ObjBackend *bend,
const char *repo_id,
int version,
const char *obj_id)
{
char path[SEAF_PATH_MAX];
SeafStat st;
id_to_path (bend->priv, obj_id, path, repo_id, version);
if (seaf_stat (path, &st) == 0)
return TRUE;
return FALSE;
}
static void
obj_backend_fs_delete (ObjBackend *bend,
const char *repo_id,
int version,
const char *obj_id)
{
char path[SEAF_PATH_MAX];
id_to_path (bend->priv, obj_id, path, repo_id, version);
g_unlink (path);
}
static int
obj_backend_fs_foreach_obj (ObjBackend *bend,
const char *repo_id,
int version,
SeafObjFunc process,
void *user_data)
{
FsPriv *priv = bend->priv;
char *obj_dir = NULL;
int dir_len;
GDir *dir1 = NULL, *dir2;
const char *dname1, *dname2;
char obj_id[128];
char path[SEAF_PATH_MAX], *pos;
int ret = 0;
#if defined MIGRATION || defined SEAFILE_CLIENT
if (version > 0)
obj_dir = g_build_filename (priv->obj_dir, repo_id, NULL);
#else
obj_dir = g_build_filename (priv->obj_dir, repo_id, NULL);
#endif
dir_len = strlen (obj_dir);
dir1 = g_dir_open (obj_dir, 0, NULL);
if (!dir1) {
goto out;
}
memcpy (path, obj_dir, dir_len);
pos = path + dir_len;
while ((dname1 = g_dir_read_name(dir1)) != NULL) {
snprintf (pos, sizeof(path) - dir_len, "/%s", dname1);
dir2 = g_dir_open (path, 0, NULL);
if (!dir2) {
seaf_warning ("Failed to open object dir %s.\n", path);
continue;
}
while ((dname2 = g_dir_read_name(dir2)) != NULL) {
snprintf (obj_id, sizeof(obj_id), "%s%s", dname1, dname2);
if (!process (repo_id, version, obj_id, user_data)) {
g_dir_close (dir2);
goto out;
}
}
g_dir_close (dir2);
}
out:
if (dir1)
g_dir_close (dir1);
g_free (obj_dir);
return ret;
}
static int
obj_backend_fs_copy (ObjBackend *bend,
const char *src_repo_id,
int src_version,
const char *dst_repo_id,
int dst_version,
const char *obj_id)
{
char src_path[SEAF_PATH_MAX];
char dst_path[SEAF_PATH_MAX];
id_to_path (bend->priv, obj_id, src_path, src_repo_id, src_version);
id_to_path (bend->priv, obj_id, dst_path, dst_repo_id, dst_version);
if (g_file_test (dst_path, G_FILE_TEST_EXISTS))
return 0;
if (create_parent_path (dst_path) < 0) {
seaf_warning ("Failed to create dst path %s for obj %s.\n",
dst_path, obj_id);
return -1;
}
#ifdef WIN32
if (!CreateHardLink (dst_path, src_path, NULL)) {
seaf_warning ("Failed to link %s to %s: %lu.\n",
src_path, dst_path, GetLastError());
return -1;
}
return 0;
#else
int ret = link (src_path, dst_path);
if (ret < 0 && errno != EEXIST) {
seaf_warning ("Failed to link %s to %s: %s.\n",
src_path, dst_path, strerror(errno));
return -1;
}
return ret;
#endif
}
static int
obj_backend_fs_remove_store (ObjBackend *bend, const char *store_id)
{
FsPriv *priv = bend->priv;
char *obj_dir = NULL;
GDir *dir1, *dir2;
const char *dname1, *dname2;
char *path1, *path2;
obj_dir = g_build_filename (priv->obj_dir, store_id, NULL);
dir1 = g_dir_open (obj_dir, 0, NULL);
if (!dir1) {
g_free (obj_dir);
return 0;
}
while ((dname1 = g_dir_read_name(dir1)) != NULL) {
path1 = g_build_filename (obj_dir, dname1, NULL);
dir2 = g_dir_open (path1, 0, NULL);
if (!dir2) {
seaf_warning ("Failed to open obj dir %s.\n", path1);
g_dir_close (dir1);
g_free (path1);
g_free (obj_dir);
return -1;
}
while ((dname2 = g_dir_read_name(dir2)) != NULL) {
path2 = g_build_filename (path1, dname2, NULL);
g_unlink (path2);
g_free (path2);
}
g_dir_close (dir2);
g_rmdir (path1);
g_free (path1);
}
g_dir_close (dir1);
g_rmdir (obj_dir);
g_free (obj_dir);
return 0;
}
ObjBackend *
obj_backend_fs_new (const char *seaf_dir, const char *obj_type)
{
ObjBackend *bend;
FsPriv *priv;
bend = g_new0(ObjBackend, 1);
priv = g_new0(FsPriv, 1);
bend->priv = priv;
priv->obj_dir = g_build_filename (seaf_dir, "storage", obj_type, NULL);
priv->dir_len = strlen (priv->obj_dir);
if (g_mkdir_with_parents (priv->obj_dir, 0777) < 0) {
seaf_warning ("[Obj Backend] Objects dir %s does not exist and"
" is unable to create\n", priv->obj_dir);
goto onerror;
}
bend->read = obj_backend_fs_read;
bend->write = obj_backend_fs_write;
bend->exists = obj_backend_fs_exists;
bend->delete = obj_backend_fs_delete;
bend->foreach_obj = obj_backend_fs_foreach_obj;
bend->copy = obj_backend_fs_copy;
bend->remove_store = obj_backend_fs_remove_store;
return bend;
onerror:
g_free (priv->obj_dir);
g_free (priv);
g_free (bend);
return NULL;
}
================================================
FILE: common/obj-backend-riak.c
================================================
#include "common.h"
#include "log.h"
#include "obj-backend.h"
#ifdef RIAK_BACKEND
#include "riak-client.h"
#include
typedef struct RiakPriv {
const char *host;
const char *port;
const char *bucket;
int n_write;
GQueue *conn_pool;
pthread_mutex_t lock;
} RiakPriv;
static SeafRiakClient *
get_connection (RiakPriv *priv)
{
SeafRiakClient *connection;
pthread_mutex_lock (&priv->lock);
connection = g_queue_pop_head (priv->conn_pool);
if (!connection)
connection = seaf_riak_client_new (priv->host, priv->port);
pthread_mutex_unlock (&priv->lock);
return connection;
}
static void
return_connection (RiakPriv *priv, SeafRiakClient *connection)
{
pthread_mutex_lock (&priv->lock);
g_queue_push_tail (priv->conn_pool, connection);
pthread_mutex_unlock (&priv->lock);
}
static int
obj_backend_riak_read (ObjBackend *bend,
const char *obj_id,
void **data,
int *len)
{
SeafRiakClient *conn = get_connection (bend->priv);
RiakPriv *priv = bend->priv;
int ret;
ret = seaf_riak_client_get (conn, priv->bucket, obj_id, data, len);
return_connection (priv, conn);
return ret;
}
static int
obj_backend_riak_write (ObjBackend *bend,
const char *obj_id,
void *data,
int len)
{
SeafRiakClient *conn = get_connection (bend->priv);
RiakPriv *priv = bend->priv;
int ret;
ret = seaf_riak_client_put (conn, priv->bucket, obj_id, data, len,
priv->n_write);
return_connection (priv, conn);
return ret;
}
static gboolean
obj_backend_riak_exists (ObjBackend *bend,
const char *obj_id)
{
SeafRiakClient *conn = get_connection (bend->priv);
RiakPriv *priv = bend->priv;
gboolean ret;
ret = seaf_riak_client_query (conn, priv->bucket, obj_id);
return_connection (priv, conn);
return ret;
}
static void
obj_backend_riak_delete (ObjBackend *bend,
const char *obj_id)
{
SeafRiakClient *conn = get_connection (bend->priv);
RiakPriv *priv = bend->priv;
seaf_riak_client_delete (conn, priv->bucket, obj_id, priv->n_write);
return_connection (priv, conn);
}
ObjBackend *
obj_backend_riak_new (const char *host,
const char *port,
const char *bucket,
const char *write_policy)
{
ObjBackend *bend;
RiakPriv *priv;
bend = g_new0(ObjBackend, 1);
priv = g_new0(RiakPriv, 1);
bend->priv = priv;
priv->host = g_strdup (host);
priv->port = g_strdup (port);
priv->bucket = g_strdup (bucket);
if (strcmp (write_policy, "quorum") == 0)
priv->n_write = RIAK_QUORUM;
else if (strcmp (write_policy, "all") == 0)
priv->n_write = RIAK_ALL;
else
g_return_val_if_reached (NULL);
priv->conn_pool = g_queue_new ();
pthread_mutex_init (&priv->lock, NULL);
bend->read = obj_backend_riak_read;
bend->write = obj_backend_riak_write;
bend->exists = obj_backend_riak_exists;
bend->delete = obj_backend_riak_delete;
return bend;
}
#else
ObjBackend *
obj_backend_riak_new (const char *host,
const char *port,
const char *bucket,
const char *write_policy)
{
seaf_warning ("Riak backend is not enabled.\n");
return NULL;
}
#endif /* RIAK_BACKEND */
================================================
FILE: common/obj-backend.h
================================================
#ifndef OBJ_BACKEND_H
#define OBJ_BACKEND_H
#include
#include "obj-store.h"
typedef struct ObjBackend ObjBackend;
struct ObjBackend {
int (*read) (ObjBackend *bend,
const char *repo_id,
int version,
const char *obj_id,
void **data,
int *len);
int (*write) (ObjBackend *bend,
const char *repo_id,
int version,
const char *obj_id,
void *data,
int len,
gboolean need_sync);
gboolean (*exists) (ObjBackend *bend,
const char *repo_id,
int version,
const char *obj_id);
void (*delete) (ObjBackend *bend,
const char *repo_id,
int version,
const char *obj_id);
int (*foreach_obj) (ObjBackend *bend,
const char *repo_id,
int version,
SeafObjFunc process,
void *user_data);
int (*copy) (ObjBackend *bend,
const char *src_repo_id,
int src_version,
const char *dst_repo_id,
int dst_version,
const char *obj_id);
int (*remove_store) (ObjBackend *bend,
const char *store_id);
void *priv;
};
#endif
================================================
FILE: common/obj-cache.c
================================================
/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
#include "common.h"
#define DEBUG_FLAG SEAFILE_DEBUG_OTHER
#include "log.h"
#include "redis-cache.h"
#include "obj-cache.h"
#define DEFAULT_MEMCACHED_EXPIRY 24 * 3600
#define DEFAULT_MAX_CONNECTIONS 100
typedef struct CacheOption {
char *cache_provider;
char *redis_host;
char *redis_passwd;
int redis_port;
int redis_max_connections;
int redis_expiry;
} CacheOption;
static void
cache_option_free (CacheOption *option)
{
if (!option)
return;
g_free (option->cache_provider);
g_free (option->redis_host);
g_free (option->redis_passwd);
g_free (option);
}
static void
load_cache_option_from_env (CacheOption *option)
{
const char *cache_provider, *redis_host, *redis_port, *redis_passwd, *redis_max_conn, *redis_expiry;
cache_provider = g_getenv("CACHE_PROVIDER");
redis_host = g_getenv("REDIS_HOST");
redis_port = g_getenv("REDIS_PORT");
redis_passwd = g_getenv("REDIS_PASSWORD");
redis_max_conn = g_getenv("REDIS_MAX_CONNECTIONS");
redis_expiry = g_getenv("REDIS_EXPIRY");
if (!cache_provider || g_strcmp0 (cache_provider, "") == 0) {
return;
}
if (cache_provider) {
g_free (option->cache_provider);
option->cache_provider = g_strdup (cache_provider);
}
if (redis_host && g_strcmp0(redis_host, "") != 0) {
g_free (option->redis_host);
option->redis_host = g_strdup (redis_host);
}
if (redis_port && g_strcmp0(redis_port, "") != 0) {
option->redis_port = atoi (redis_port);
}
if (redis_passwd && g_strcmp0 (redis_passwd, "") != 0) {
g_free (option->redis_passwd);
option->redis_passwd = g_strdup (redis_passwd);
}
if (redis_max_conn && g_strcmp0 (redis_max_conn, "") != 0) {
option->redis_max_connections = atoi (redis_max_conn);
}
if (redis_expiry && g_strcmp0 (redis_expiry, "") != 0) {
option->redis_expiry = atoi (redis_expiry);
}
}
ObjCache *
objcache_new (GKeyFile *config)
{
ObjCache *cache = NULL;
GError *error = NULL;
CacheOption *option = g_new0 (CacheOption, 1);
int redis_port;
int redis_expiry;
int redis_max_connections;
redis_expiry = DEFAULT_MEMCACHED_EXPIRY;
redis_port = 6379;
redis_max_connections = DEFAULT_MAX_CONNECTIONS;
option->redis_port = redis_port;
option->redis_max_connections = redis_max_connections;
option->redis_expiry = redis_expiry;
load_cache_option_from_env (option);
if (g_strcmp0 (option->cache_provider, "redis") == 0) {
cache = redis_cache_new (option->redis_host, option->redis_passwd, option->redis_port, option->redis_expiry, option->redis_max_connections);
} else if (option->cache_provider){
seaf_warning ("Unsupported cache provider: %s\n", option->cache_provider);
}
cache_option_free (option);
return cache;
}
void *
objcache_get_object (ObjCache *cache, const char *obj_id, size_t *len)
{
return cache->get_object (cache, obj_id, len);
}
int
objcache_set_object (ObjCache *cache,
const char *obj_id,
const void *object,
int len,
int expiry)
{
return cache->set_object (cache, obj_id, object, len, expiry);
}
gboolean
objcache_test_object (ObjCache *cache, const char *obj_id)
{
return cache->test_object (cache, obj_id);
}
int
objcache_delete_object (ObjCache *cache, const char *obj_id)
{
return cache->delete_object (cache, obj_id);
}
int
objcache_set_object_existence (ObjCache *cache, const char *obj_id, int val, int expiry, const char *existence_prefix)
{
char *key;
char buf[8];
int n;
int ret;
key = g_strdup_printf ("%s%s", existence_prefix, obj_id);
n = snprintf (buf, sizeof(buf), "%d", val);
ret = cache->set_object (cache, key, buf, n+1, expiry);
g_free (key);
return ret;
}
int
objcache_get_object_existence (ObjCache *cache, const char *obj_id, int *val_out, const char *existence_prefix)
{
char *key;
size_t len;
char *val;
int ret = 0;
key = g_strdup_printf ("%s%s", existence_prefix, obj_id);
val = cache->get_object (cache, key, &len);
if (!val)
ret = -1;
else
*val_out = atoi(val);
g_free (key);
g_free (val);
return ret;
}
int
objcache_delete_object_existence (ObjCache *cache, const char *obj_id, const char *existence_prefix)
{
char *key;
int ret;
key = g_strdup_printf ("%s%s", existence_prefix, obj_id);
ret = cache->delete_object (cache, key);
g_free (key);
return ret;
}
int
objcache_publish (ObjCache *cache, const char *channel, const char *msg)
{
int ret;
ret = cache->publish (cache, channel, msg);
return ret;
}
int
objcache_push (ObjCache *cache, const char *list, const char *msg)
{
int ret;
ret = cache->push (cache, list, msg);
return ret;
}
================================================
FILE: common/obj-cache.h
================================================
/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
#ifndef OBJ_CACHE_H
#define OBJ_CACHE_H
#define DEFAULT_MEMCACHED_EXPIRY 24 * 3600
#define TYPE_REDIS 0x02
typedef struct ObjCache ObjCache;
struct ObjCache {
void* (*get_object) (ObjCache *cache,
const char *obj_id,
size_t *len);
int (*set_object) (ObjCache *cache,
const char *obj_id,
const void *object,
int len,
int expiry);
gboolean (*test_object) (ObjCache *cache,
const char *obj_id);
int (*delete_object) (ObjCache *cache,
const char *obj_id);
int (*publish) (ObjCache *cache,
const char *channel,
const char *msg);
int (*push) (ObjCache *cache,
const char *list,
const char *msg);
int mc_expiry;
char *host;
int port;
char cache_type;
void *priv;
};
ObjCache *
objcache_new ();
void *
objcache_get_object (struct ObjCache *cache, const char *obj_id, size_t *len);
int
objcache_set_object (struct ObjCache *cache,
const char *obj_id,
const void *object,
int len,
int expiry);
gboolean
objcache_test_object (struct ObjCache *cache, const char *obj_id);
int
objcache_delete_object (struct ObjCache *cache, const char *obj_id);
int
objcache_set_object_existence (struct ObjCache *cache, const char *obj_id, int val, int expiry, const char *existence_prefix);
int
objcache_get_object_existence (struct ObjCache *cache, const char *obj_id, int *val_out, const char *existence_prefix);
int
objcache_delete_object_existence (struct ObjCache *cache, const char *obj_id, const char *existence_prefix);
int
objcache_publish (ObjCache *cache, const char *channel, const char *msg);
int
objcache_push (ObjCache *cache, const char *list, const char *msg);
#endif
================================================
FILE: common/obj-store.c
================================================
#include "common.h"
#include "log.h"
#include "seafile-session.h"
#include "utils.h"
#include "obj-backend.h"
#include "obj-store.h"
struct SeafObjStore {
ObjBackend *bend;
};
typedef struct SeafObjStore SeafObjStore;
extern ObjBackend *
obj_backend_fs_new (const char *seaf_dir, const char *obj_type);
struct SeafObjStore *
seaf_obj_store_new (SeafileSession *seaf, const char *obj_type)
{
SeafObjStore *store = g_new0 (SeafObjStore, 1);
if (!store)
return NULL;
store->bend = obj_backend_fs_new (seaf->seaf_dir, obj_type);
if (!store->bend) {
seaf_warning ("[Object store] Failed to load backend.\n");
g_free (store);
return NULL;
}
return store;
}
int
seaf_obj_store_init (SeafObjStore *obj_store)
{
return 0;
}
int
seaf_obj_store_read_obj (struct SeafObjStore *obj_store,
const char *repo_id,
int version,
const char *obj_id,
void **data,
int *len)
{
ObjBackend *bend = obj_store->bend;
if (!repo_id || !is_uuid_valid(repo_id) ||
!obj_id || !is_object_id_valid(obj_id))
return -1;
return bend->read (bend, repo_id, version, obj_id, data, len);
}
int
seaf_obj_store_write_obj (struct SeafObjStore *obj_store,
const char *repo_id,
int version,
const char *obj_id,
void *data,
int len,
gboolean need_sync)
{
ObjBackend *bend = obj_store->bend;
if (!repo_id || !is_uuid_valid(repo_id) ||
!obj_id || !is_object_id_valid(obj_id))
return -1;
return bend->write (bend, repo_id, version, obj_id, data, len, need_sync);
}
gboolean
seaf_obj_store_obj_exists (struct SeafObjStore *obj_store,
const char *repo_id,
int version,
const char *obj_id)
{
ObjBackend *bend = obj_store->bend;
if (!repo_id || !is_uuid_valid(repo_id) ||
!obj_id || !is_object_id_valid(obj_id))
return FALSE;
return bend->exists (bend, repo_id, version, obj_id);
}
void
seaf_obj_store_delete_obj (struct SeafObjStore *obj_store,
const char *repo_id,
int version,
const char *obj_id)
{
ObjBackend *bend = obj_store->bend;
if (!repo_id || !is_uuid_valid(repo_id) ||
!obj_id || !is_object_id_valid(obj_id))
return;
return bend->delete (bend, repo_id, version, obj_id);
}
int
seaf_obj_store_foreach_obj (struct SeafObjStore *obj_store,
const char *repo_id,
int version,
SeafObjFunc process,
void *user_data)
{
ObjBackend *bend = obj_store->bend;
return bend->foreach_obj (bend, repo_id, version, process, user_data);
}
int
seaf_obj_store_copy_obj (struct SeafObjStore *obj_store,
const char *src_repo_id,
int src_version,
const char *dst_repo_id,
int dst_version,
const char *obj_id)
{
ObjBackend *bend = obj_store->bend;
if (strcmp (obj_id, EMPTY_SHA1) == 0)
return 0;
return bend->copy (bend, src_repo_id, src_version, dst_repo_id, dst_version, obj_id);
}
int
seaf_obj_store_remove_store (struct SeafObjStore *obj_store,
const char *store_id)
{
ObjBackend *bend = obj_store->bend;
return bend->remove_store (bend, store_id);
}
================================================
FILE: common/obj-store.h
================================================
#ifndef OBJ_STORE_H
#define OBJ_STORE_H
#include
#include
struct _SeafileSession;
struct SeafObjStore;
struct SeafObjStore *
seaf_obj_store_new (struct _SeafileSession *seaf, const char *obj_type);
int
seaf_obj_store_init (struct SeafObjStore *obj_store);
/* Synchronous I/O interface. */
int
seaf_obj_store_read_obj (struct SeafObjStore *obj_store,
const char *repo_id,
int version,
const char *obj_id,
void **data,
int *len);
int
seaf_obj_store_write_obj (struct SeafObjStore *obj_store,
const char *repo_id,
int version,
const char *obj_id,
void *data,
int len,
gboolean need_sync);
gboolean
seaf_obj_store_obj_exists (struct SeafObjStore *obj_store,
const char *repo_id,
int version,
const char *obj_id);
void
seaf_obj_store_delete_obj (struct SeafObjStore *obj_store,
const char *repo_id,
int version,
const char *obj_id);
typedef gboolean (*SeafObjFunc) (const char *repo_id,
int version,
const char *obj_id,
void *user_data);
int
seaf_obj_store_foreach_obj (struct SeafObjStore *obj_store,
const char *repo_id,
int version,
SeafObjFunc process,
void *user_data);
int
seaf_obj_store_copy_obj (struct SeafObjStore *obj_store,
const char *src_store_id,
int src_version,
const char *dst_store_id,
int dst_version,
const char *obj_id);
int
seaf_obj_store_remove_store (struct SeafObjStore *obj_store,
const char *store_id);
#endif
================================================
FILE: common/object-list.c
================================================
/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
#include "common.h"
#include "object-list.h"
ObjectList *
object_list_new ()
{
ObjectList *ol = g_new0 (ObjectList, 1);
ol->obj_hash = g_hash_table_new_full (g_str_hash, g_str_equal, NULL, NULL);
ol->obj_ids = g_ptr_array_new_with_free_func (g_free);
return ol;
}
void
object_list_free (ObjectList *ol)
{
if (ol->obj_hash)
g_hash_table_destroy (ol->obj_hash);
g_ptr_array_free (ol->obj_ids, TRUE);
g_free (ol);
}
void
object_list_serialize (ObjectList *ol, uint8_t **buffer, uint32_t *len)
{
uint32_t i;
uint32_t offset = 0;
uint8_t *buf;
int ollen = object_list_length(ol);
buf = g_new (uint8_t, 41 * ollen);
for (i = 0; i < ollen; ++i) {
memcpy (&buf[offset], g_ptr_array_index(ol->obj_ids, i), 41);
offset += 41;
}
*buffer = buf;
*len = 41 * ollen;
}
gboolean
object_list_insert (ObjectList *ol, const char *object_id)
{
if (g_hash_table_lookup (ol->obj_hash, object_id))
return FALSE;
char *id = g_strdup(object_id);
g_hash_table_replace (ol->obj_hash, id, id);
g_ptr_array_add (ol->obj_ids, id);
return TRUE;
}
================================================
FILE: common/object-list.h
================================================
/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
#ifndef OBJECT_LIST_H
#define OBJECT_LIST_H
#include
typedef struct {
GHashTable *obj_hash;
GPtrArray *obj_ids;
} ObjectList;
ObjectList *
object_list_new ();
void
object_list_free (ObjectList *ol);
void
object_list_serialize (ObjectList *ol, uint8_t **buffer, uint32_t *len);
/**
* Add object to ObjectList.
* Return FALSE if it is already in the list, TRUE otherwise.
*/
gboolean
object_list_insert (ObjectList *ol, const char *object_id);
inline static gboolean
object_list_exists (ObjectList *ol, const char *object_id)
{
return (g_hash_table_lookup(ol->obj_hash, object_id) != NULL);
}
inline static int
object_list_length (ObjectList *ol)
{
return ol->obj_ids->len;
}
#endif
================================================
FILE: common/org-mgr.c
================================================
/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
#include "common.h"
#include "seafile-session.h"
#include "seaf-db.h"
#include "org-mgr.h"
#include "seaf-utils.h"
#include "utils.h"
#include "log.h"
#define DEFAULT_MAX_CONNECTIONS 100
struct _CcnetOrgManagerPriv
{
CcnetDB *db;
};
static int open_db (CcnetOrgManager *manager);
static int check_db_table (CcnetDB *db);
CcnetOrgManager* ccnet_org_manager_new (SeafileSession *session)
{
CcnetOrgManager *manager = g_new0 (CcnetOrgManager, 1);
manager->session = session;
manager->priv = g_new0 (CcnetOrgManagerPriv, 1);
return manager;
}
int
ccnet_org_manager_init (CcnetOrgManager *manager)
{
return 0;
}
int
ccnet_org_manager_prepare (CcnetOrgManager *manager)
{
return open_db (manager);
}
static CcnetDB *
open_sqlite_db (CcnetOrgManager *manager)
{
CcnetDB *db = NULL;
char *db_dir;
char *db_path;
db_dir = g_build_filename (manager->session->ccnet_dir, "OrgMgr", NULL);
if (checkdir_with_mkdir(db_dir) < 0) {
ccnet_error ("Cannot open db dir %s: %s\n", db_dir,
strerror(errno));
g_free (db_dir);
return NULL;
}
g_free (db_dir);
db_path = g_build_filename (manager->session->ccnet_dir, "OrgMgr",
"orgmgr.db", NULL);
db = seaf_db_new_sqlite (db_path, DEFAULT_MAX_CONNECTIONS);
g_free (db_path);
return db;
}
static int
open_db (CcnetOrgManager *manager)
{
CcnetDB *db = NULL;
switch (seaf_db_type(manager->session->ccnet_db)) {
case SEAF_DB_TYPE_SQLITE:
db = open_sqlite_db (manager);
break;
case SEAF_DB_TYPE_PGSQL:
case SEAF_DB_TYPE_MYSQL:
db = manager->session->ccnet_db;
break;
}
if (!db)
return -1;
manager->priv->db = db;
if ((manager->session->create_tables || seaf_db_type(db) == SEAF_DB_TYPE_PGSQL)
&& check_db_table (db) < 0) {
ccnet_warning ("Failed to create org db tables.\n");
return -1;
}
return 0;
}
void ccnet_org_manager_start (CcnetOrgManager *manager)
{
}
/* -------- Group Database Management ---------------- */
static int check_db_table (CcnetDB *db)
{
char *sql;
int db_type = seaf_db_type (db);
if (db_type == SEAF_DB_TYPE_MYSQL) {
sql = "CREATE TABLE IF NOT EXISTS Organization (org_id BIGINT"
" PRIMARY KEY AUTO_INCREMENT, org_name VARCHAR(255),"
" url_prefix VARCHAR(255), creator VARCHAR(255), ctime BIGINT,"
" UNIQUE INDEX (url_prefix))"
"ENGINE=INNODB";
if (seaf_db_query (db, sql) < 0)
return -1;
sql = "CREATE TABLE IF NOT EXISTS OrgUser ( "
"id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, org_id INTEGER, "
"email VARCHAR(255), is_staff BOOL NOT NULL, "
"INDEX (email), UNIQUE INDEX(org_id, email))"
"ENGINE=INNODB";
if (seaf_db_query (db, sql) < 0)
return -1;
sql = "CREATE TABLE IF NOT EXISTS OrgGroup ("
"id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, org_id INTEGER, "
"group_id INTEGER, INDEX (group_id), "
"UNIQUE INDEX(org_id, group_id))"
"ENGINE=INNODB";
if (seaf_db_query (db, sql) < 0)
return -1;
} else if (db_type == SEAF_DB_TYPE_SQLITE) {
sql = "CREATE TABLE IF NOT EXISTS Organization (org_id INTEGER"
" PRIMARY KEY AUTOINCREMENT, org_name VARCHAR(255),"
" url_prefix VARCHAR(255), "
" creator VARCHAR(255), ctime BIGINT)";
if (seaf_db_query (db, sql) < 0)
return -1;
sql = "CREATE UNIQUE INDEX IF NOT EXISTS url_prefix_indx on "
"Organization (url_prefix)";
if (seaf_db_query (db, sql) < 0)
return -1;
sql = "CREATE TABLE IF NOT EXISTS OrgUser (org_id INTEGER, "
"email TEXT, is_staff bool NOT NULL)";
if (seaf_db_query (db, sql) < 0)
return -1;
sql = "CREATE INDEX IF NOT EXISTS email_indx on "
"OrgUser (email)";
if (seaf_db_query (db, sql) < 0)
return -1;
sql = "CREATE UNIQUE INDEX IF NOT EXISTS orgid_email_indx on "
"OrgUser (org_id, email)";
if (seaf_db_query (db, sql) < 0)
return -1;
sql = "CREATE TABLE IF NOT EXISTS OrgGroup (org_id INTEGER, "
"group_id INTEGER)";
if (seaf_db_query (db, sql) < 0)
return -1;
sql = "CREATE INDEX IF NOT EXISTS groupid_indx on OrgGroup (group_id)";
if (seaf_db_query (db, sql) < 0)
return -1;
sql = "CREATE UNIQUE INDEX IF NOT EXISTS org_group_indx on "
"OrgGroup (org_id, group_id)";
if (seaf_db_query (db, sql) < 0)
return -1;
} else if (db_type == SEAF_DB_TYPE_PGSQL) {
sql = "CREATE TABLE IF NOT EXISTS Organization (org_id SERIAL"
" PRIMARY KEY, org_name VARCHAR(255),"
" url_prefix VARCHAR(255), creator VARCHAR(255), ctime BIGINT,"
" UNIQUE (url_prefix))";
if (seaf_db_query (db, sql) < 0)
return -1;
sql = "CREATE TABLE IF NOT EXISTS OrgUser (org_id INTEGER, "
"email VARCHAR(255), is_staff INTEGER NOT NULL, "
"UNIQUE (org_id, email))";
if (seaf_db_query (db, sql) < 0)
return -1;
//if (!pgsql_index_exists (db, "orguser_email_idx")) {
// sql = "CREATE INDEX orguser_email_idx ON OrgUser (email)";
// if (seaf_db_query (db, sql) < 0)
// return -1;
//}
sql = "CREATE TABLE IF NOT EXISTS OrgGroup (org_id INTEGER, "
"group_id INTEGER, "
"UNIQUE (org_id, group_id))";
if (seaf_db_query (db, sql) < 0)
return -1;
//if (!pgsql_index_exists (db, "orggroup_groupid_idx")) {
// sql = "CREATE INDEX orggroup_groupid_idx ON OrgGroup (group_id)";
// if (seaf_db_query (db, sql) < 0)
// return -1;
//}
}
return 0;
}
int ccnet_org_manager_create_org (CcnetOrgManager *mgr,
const char *org_name,
const char *url_prefix,
const char *creator,
GError **error)
{
CcnetDB *db = mgr->priv->db;
gint64 now = get_current_time();
int rc;
rc = seaf_db_statement_query (db,
"INSERT INTO Organization(org_name, url_prefix,"
" creator, ctime) VALUES (?, ?, ?, ?)",
4, "string", org_name, "string", url_prefix,
"string", creator, "int64", now);
if (rc < 0) {
g_set_error (error, CCNET_DOMAIN, 0, "Failed to create organization");
return -1;
}
int org_id = seaf_db_statement_get_int (db,
"SELECT org_id FROM Organization WHERE "
"url_prefix = ?", 1, "string", url_prefix);
if (org_id < 0) {
g_set_error (error, CCNET_DOMAIN, 0, "Failed to create organization");
return -1;
}
rc = seaf_db_statement_query (db, "INSERT INTO OrgUser (org_id, email, is_staff) values (?, ?, ?)",
3, "int", org_id, "string", creator, "int", 1);
if (rc < 0) {
seaf_db_statement_query (db, "DELETE FROM Organization WHERE org_id=?",
1, "int", org_id);
g_set_error (error, CCNET_DOMAIN, 0, "Failed to create organization");
return -1;
}
return org_id;
}
int
ccnet_org_manager_remove_org (CcnetOrgManager *mgr,
int org_id,
GError **error)
{
CcnetDB *db = mgr->priv->db;
seaf_db_statement_query (db, "DELETE FROM Organization WHERE org_id = ?",
1, "int", org_id);
seaf_db_statement_query (db, "DELETE FROM OrgUser WHERE org_id = ?",
1, "int", org_id);
seaf_db_statement_query (db, "DELETE FROM OrgGroup WHERE org_id = ?",
1, "int", org_id);
return 0;
}
static gboolean
get_all_orgs_cb (CcnetDBRow *row, void *data)
{
GList **p_list = data;
CcnetOrganization *org = NULL;
int org_id;
const char *org_name;
const char *url_prefix;
const char *creator;
gint64 ctime;
org_id = seaf_db_row_get_column_int (row, 0);
org_name = seaf_db_row_get_column_text (row, 1);
url_prefix = seaf_db_row_get_column_text (row, 2);
creator = seaf_db_row_get_column_text (row, 3);
ctime = seaf_db_row_get_column_int64 (row, 4);
org = g_object_new (CCNET_TYPE_ORGANIZATION,
"org_id", org_id,
"org_name", org_name,
"url_prefix", url_prefix,
"creator", creator,
"ctime", ctime,
NULL);
*p_list = g_list_prepend (*p_list, org);
return TRUE;
}
GList *
ccnet_org_manager_get_all_orgs (CcnetOrgManager *mgr,
int start,
int limit)
{
CcnetDB *db = mgr->priv->db;
char *sql;
GList *ret = NULL;
int rc;
if (start == -1 && limit == -1) {
sql = "SELECT * FROM Organization ORDER BY org_id";
rc = seaf_db_statement_foreach_row (db, sql, get_all_orgs_cb, &ret, 0);
} else {
sql = "SELECT * FROM Organization ORDER BY org_id LIMIT ? OFFSET ?";
rc = seaf_db_statement_foreach_row (db, sql, get_all_orgs_cb, &ret,
2, "int", limit, "int", start);
}
if (rc < 0)
return NULL;
return g_list_reverse (ret);
}
int
ccnet_org_manager_count_orgs (CcnetOrgManager *mgr)
{
CcnetDB *db = mgr->priv->db;
char *sql;
gint64 ret;
sql = "SELECT count(*) FROM Organization";
ret = seaf_db_get_int64 (db, sql);
if (ret < 0)
return -1;
return ret;
}
static gboolean
get_org_cb (CcnetDBRow *row, void *data)
{
CcnetOrganization **p_org = data;
int org_id;
const char *org_name;
const char *url_prefix;
const char *creator;
gint64 ctime;
org_id = seaf_db_row_get_column_int (row, 0);
org_name = seaf_db_row_get_column_text (row, 1);
url_prefix = seaf_db_row_get_column_text (row, 2);
creator = seaf_db_row_get_column_text (row, 3);
ctime = seaf_db_row_get_column_int64 (row, 4);
*p_org = g_object_new (CCNET_TYPE_ORGANIZATION,
"org_id", org_id,
"org_name", org_name,
"url_prefix", url_prefix,
"creator", creator,
"ctime", ctime,
NULL);
return FALSE;
}
CcnetOrganization *
ccnet_org_manager_get_org_by_url_prefix (CcnetOrgManager *mgr,
const char *url_prefix,
GError **error)
{
CcnetDB *db = mgr->priv->db;
char *sql;
CcnetOrganization *org = NULL;
sql = "SELECT org_id, org_name, url_prefix, creator,"
" ctime FROM Organization WHERE url_prefix = ?";
if (seaf_db_statement_foreach_row (db, sql, get_org_cb, &org,
1, "string", url_prefix) < 0) {
return NULL;
}
return org;
}
CcnetOrganization *
ccnet_org_manager_get_org_by_id (CcnetOrgManager *mgr,
int org_id,
GError **error)
{
CcnetDB *db = mgr->priv->db;
char *sql;
CcnetOrganization *org = NULL;
sql = "SELECT org_id, org_name, url_prefix, creator,"
" ctime FROM Organization WHERE org_id = ?";
if (seaf_db_statement_foreach_row (db, sql, get_org_cb, &org,
1, "int", org_id) < 0) {
return NULL;
}
return org;
}
int
ccnet_org_manager_add_org_user (CcnetOrgManager *mgr,
int org_id,
const char *email,
int is_staff,
GError **error)
{
CcnetDB *db = mgr->priv->db;
return seaf_db_statement_query (db, "INSERT INTO OrgUser (org_id, email, is_staff) values (?, ?, ?)",
3, "int", org_id, "string", email,
"int", is_staff);
}
int
ccnet_org_manager_remove_org_user (CcnetOrgManager *mgr,
int org_id,
const char *email,
GError **error)
{
CcnetDB *db = mgr->priv->db;
return seaf_db_statement_query (db, "DELETE FROM OrgUser WHERE org_id=? AND "
"email=?", 2, "int", org_id, "string", email);
}
static gboolean
get_orgs_by_user_cb (CcnetDBRow *row, void *data)
{
GList **p_list = (GList **)data;
CcnetOrganization *org = NULL;
int org_id;
const char *email;
int is_staff;
const char *org_name;
const char *url_prefix;
const char *creator;
gint64 ctime;
org_id = seaf_db_row_get_column_int (row, 0);
email = (char *) seaf_db_row_get_column_text (row, 1);
is_staff = seaf_db_row_get_column_int (row, 2);
org_name = (char *) seaf_db_row_get_column_text (row, 3);
url_prefix = (char *) seaf_db_row_get_column_text (row, 4);
creator = (char *) seaf_db_row_get_column_text (row, 5);
ctime = seaf_db_row_get_column_int64 (row, 6);
org = g_object_new (CCNET_TYPE_ORGANIZATION,
"org_id", org_id,
"email", email,
"is_staff", is_staff,
"org_name", org_name,
"url_prefix", url_prefix,
"creator", creator,
"ctime", ctime,
NULL);
*p_list = g_list_prepend (*p_list, org);
return TRUE;
}
GList *
ccnet_org_manager_get_orgs_by_user (CcnetOrgManager *mgr,
const char *email,
GError **error)
{
CcnetDB *db = mgr->priv->db;
char *sql;
GList *ret = NULL;
sql = "SELECT t1.org_id, email, is_staff, org_name,"
" url_prefix, creator, ctime FROM OrgUser t1, Organization t2"
" WHERE t1.org_id = t2.org_id AND email = ?";
if (seaf_db_statement_foreach_row (db, sql, get_orgs_by_user_cb, &ret,
1, "string", email) < 0) {
g_list_free (ret);
return NULL;
}
return g_list_reverse (ret);
}
static gboolean
get_org_emailusers (CcnetDBRow *row, void *data)
{
GList **list = (GList **)data;
const char *email = (char *) seaf_db_row_get_column_text (row, 0);
*list = g_list_prepend (*list, g_strdup (email));
return TRUE;
}
GList *
ccnet_org_manager_get_org_emailusers (CcnetOrgManager *mgr,
const char *url_prefix,
int start, int limit)
{
CcnetDB *db = mgr->priv->db;
char *sql;
GList *ret = NULL;
int rc;
if (start == -1 && limit == -1) {
sql = "SELECT u.email FROM OrgUser u, Organization o "
"WHERE u.org_id = o.org_id AND "
"o.url_prefix = ? "
"ORDER BY email";
rc = seaf_db_statement_foreach_row (db, sql, get_org_emailusers, &ret,
1, "string", url_prefix);
} else {
sql = "SELECT u.email FROM OrgUser u, Organization o "
"WHERE u.org_id = o.org_id AND "
"o.url_prefix = ? "
" ORDER BY email LIMIT ? OFFSET ?";
rc = seaf_db_statement_foreach_row (db, sql, get_org_emailusers, &ret,
3, "string", url_prefix,
"int", limit, "int", start);
}
if (rc < 0)
return NULL;
return g_list_reverse (ret);
}
int
ccnet_org_manager_add_org_group (CcnetOrgManager *mgr,
int org_id,
int group_id,
GError **error)
{
CcnetDB *db = mgr->priv->db;
return seaf_db_statement_query (db, "INSERT INTO OrgGroup (org_id, group_id) VALUES (?, ?)",
2, "int", org_id, "int", group_id);
}
int
ccnet_org_manager_remove_org_group (CcnetOrgManager *mgr,
int org_id,
int group_id,
GError **error)
{
CcnetDB *db = mgr->priv->db;
return seaf_db_statement_query (db, "DELETE FROM OrgGroup WHERE org_id=?"
" AND group_id=?",
2, "int", org_id, "string", group_id);
}
int
ccnet_org_manager_is_org_group (CcnetOrgManager *mgr,
int group_id,
GError **error)
{
gboolean exists, err;
CcnetDB *db = mgr->priv->db;
exists = seaf_db_statement_exists (db, "SELECT group_id FROM OrgGroup "
"WHERE group_id = ?", &err, 1, "int", group_id);
if (err) {
ccnet_warning ("DB error when check group exist in OrgGroup.\n");
return 0;
}
return exists;
}
int
ccnet_org_manager_get_org_id_by_group (CcnetOrgManager *mgr,
int group_id,
GError **error)
{
CcnetDB *db = mgr->priv->db;
char *sql;
sql = "SELECT org_id FROM OrgGroup WHERE group_id = ?";
return seaf_db_statement_get_int (db, sql, 1, "int", group_id);
}
static gboolean
get_org_group_ids (CcnetDBRow *row, void *data)
{
GList **plist = data;
int group_id = seaf_db_row_get_column_int (row, 0);
*plist = g_list_prepend (*plist, (gpointer)(long)group_id);
return TRUE;
}
GList *
ccnet_org_manager_get_org_group_ids (CcnetOrgManager *mgr,
int org_id,
int start,
int limit)
{
CcnetDB *db = mgr->priv->db;
GList *ret = NULL;
int rc;
if (limit == -1) {
rc = seaf_db_statement_foreach_row (db,
"SELECT group_id FROM OrgGroup WHERE "
"org_id = ?",
get_org_group_ids, &ret,
1, "int", org_id);
} else {
rc = seaf_db_statement_foreach_row (db,
"SELECT group_id FROM OrgGroup WHERE "
"org_id = ? LIMIT ? OFFSET ?",
get_org_group_ids, &ret,
3, "int", org_id, "int", limit,
"int", start);
}
if (rc < 0) {
g_list_free (ret);
return NULL;
}
return g_list_reverse (ret);
}
static gboolean
get_org_groups (CcnetDBRow *row, void *data)
{
GList **plist = data;
CcnetGroup *group;
int group_id = seaf_db_row_get_column_int (row, 0);
const char *group_name = seaf_db_row_get_column_text (row, 1);
const char *creator_name = seaf_db_row_get_column_text (row, 2);
gint64 ts = seaf_db_row_get_column_int64 (row, 3);
int parent_group_id = seaf_db_row_get_column_int (row, 4);
group = g_object_new (CCNET_TYPE_GROUP,
"id", group_id,
"group_name", group_name,
"creator_name", creator_name,
"timestamp", ts,
"source", "DB",
"parent_group_id", parent_group_id,
NULL);
*plist = g_list_prepend (*plist, group);
return TRUE;
}
GList *
ccnet_org_manager_get_org_top_groups (CcnetOrgManager *mgr, int org_id, GError **error)
{
CcnetDB *db = mgr->priv->db;
GList *ret = NULL;
char *sql;
int rc;
sql = "SELECT g.group_id, group_name, creator_name, timestamp, parent_group_id FROM "
"`OrgGroup` o, `Group` g WHERE o.group_id = g.group_id AND "
"org_id=? AND parent_group_id=-1 ORDER BY timestamp DESC";
rc = seaf_db_statement_foreach_row (db, sql,
get_org_groups, &ret,
1, "int", org_id);
if (rc < 0)
return NULL;
return g_list_reverse (ret);
}
GList *
ccnet_org_manager_get_org_groups (CcnetOrgManager *mgr,
int org_id,
int start,
int limit)
{
CcnetDB *db = mgr->priv->db;
char *sql;
GList *ret = NULL;
int rc;
if (limit == -1) {
sql = "SELECT g.group_id, group_name, creator_name, timestamp, parent_group_id FROM "
"OrgGroup o, `Group` g WHERE o.group_id = g.group_id AND org_id = ?";
rc = seaf_db_statement_foreach_row (db,
sql,
get_org_groups, &ret,
1, "int", org_id);
} else {
sql = "SELECT g.group_id, group_name, creator_name, timestamp, parent_group_id FROM "
"OrgGroup o, `Group` g WHERE o.group_id = g.group_id AND org_id = ? "
"LIMIT ? OFFSET ?";
rc = seaf_db_statement_foreach_row (db,
sql,
get_org_groups, &ret,
3, "int", org_id, "int", limit,
"int", start);
}
if (rc < 0) {
return NULL;
}
return g_list_reverse (ret);
}
GList *
ccnet_org_manager_get_org_groups_by_user (CcnetOrgManager *mgr,
const char *user,
int org_id)
{
CcnetDB *db = mgr->priv->db;
char *sql;
GList *ret = NULL;
int rc;
sql = "SELECT g.group_id, group_name, creator_name, timestamp FROM "
"OrgGroup o, `Group` g, GroupUser u "
"WHERE o.group_id = g.group_id AND org_id = ? AND "
"g.group_id = u.group_id AND user_name = ?";
rc = seaf_db_statement_foreach_row (db,
sql,
get_org_groups, &ret,
2, "int", org_id, "string", user);
if (rc < 0)
return NULL;
return g_list_reverse (ret);
}
int
ccnet_org_manager_org_user_exists (CcnetOrgManager *mgr,
int org_id,
const char *email,
GError **error)
{
gboolean exists, err;
CcnetDB *db = mgr->priv->db;
exists = seaf_db_statement_exists (db, "SELECT org_id FROM OrgUser WHERE "
"org_id = ? AND email = ?", &err,
2, "int", org_id, "string", email);
if (err) {
ccnet_warning ("DB error when check user exist in OrgUser.\n");
return 0;
}
return exists;
}
char *
ccnet_org_manager_get_url_prefix_by_org_id (CcnetOrgManager *mgr,
int org_id,
GError **error)
{
CcnetDB *db = mgr->priv->db;
char *sql;
sql = "SELECT url_prefix FROM Organization WHERE org_id = ?";
return seaf_db_statement_get_string (db, sql, 1, "int", org_id);
}
int
ccnet_org_manager_is_org_staff (CcnetOrgManager *mgr,
int org_id,
const char *email,
GError **error)
{
CcnetDB *db = mgr->priv->db;
char *sql;
sql = "SELECT is_staff FROM OrgUser WHERE org_id=? AND email=?";
return seaf_db_statement_get_int (db, sql, 2, "int", org_id, "string", email);
}
int
ccnet_org_manager_set_org_staff (CcnetOrgManager *mgr,
int org_id,
const char *email,
GError **error)
{
CcnetDB *db = mgr->priv->db;
return seaf_db_statement_query (db, "UPDATE OrgUser SET is_staff = 1 "
"WHERE org_id=? AND email=?", 2,
"int", org_id, "string", email);
}
int
ccnet_org_manager_unset_org_staff (CcnetOrgManager *mgr,
int org_id,
const char *email,
GError **error)
{
CcnetDB *db = mgr->priv->db;
return seaf_db_statement_query (db, "UPDATE OrgUser SET is_staff = 0 "
"WHERE org_id=? AND email=?", 2,
"int", org_id, "string", email);
}
int
ccnet_org_manager_set_org_name(CcnetOrgManager *mgr,
int org_id,
const char *org_name,
GError **error)
{
CcnetDB *db = mgr->priv->db;
return seaf_db_statement_query (db,
"UPDATE `Organization` set org_name = ? "
"WHERE org_id = ?",
2, "string", org_name, "int", org_id);
return 0;
}
================================================
FILE: common/org-mgr.h
================================================
/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
#ifndef _ORG_MGR_H_
#define _ORG_MGR_H_
typedef struct _SeafileSession SeafileSession;
typedef struct _CcnetOrgManager CcnetOrgManager;
typedef struct _CcnetOrgManagerPriv CcnetOrgManagerPriv;
struct _CcnetOrgManager
{
SeafileSession *session;
CcnetOrgManagerPriv *priv;
};
CcnetOrgManager* ccnet_org_manager_new (SeafileSession *session);
int
ccnet_org_manager_prepare (CcnetOrgManager *manager);
void
ccnet_org_manager_start (CcnetOrgManager *manager);
int
ccnet_org_manager_create_org (CcnetOrgManager *mgr,
const char *org_name,
const char *url_prefix,
const char *creator,
GError **error);
int
ccnet_org_manager_remove_org (CcnetOrgManager *mgr,
int org_id,
GError **error);
GList *
ccnet_org_manager_get_all_orgs (CcnetOrgManager *mgr,
int start,
int limit);
int
ccnet_org_manager_count_orgs (CcnetOrgManager *mgr);
CcnetOrganization *
ccnet_org_manager_get_org_by_url_prefix (CcnetOrgManager *mgr,
const char *url_prefix,
GError **error);
CcnetOrganization *
ccnet_org_manager_get_org_by_id (CcnetOrgManager *mgr,
int org_id,
GError **error);
int
ccnet_org_manager_add_org_user (CcnetOrgManager *mgr,
int org_id,
const char *email,
int is_staff,
GError **error);
int
ccnet_org_manager_remove_org_user (CcnetOrgManager *mgr,
int org_id,
const char *email,
GError **error);
GList *
ccnet_org_manager_get_orgs_by_user (CcnetOrgManager *mgr,
const char *email,
GError **error);
GList *
ccnet_org_manager_get_org_emailusers (CcnetOrgManager *mgr,
const char *url_prefix,
int start, int limit);
int
ccnet_org_manager_add_org_group (CcnetOrgManager *mgr,
int org_id,
int group_id,
GError **error);
int
ccnet_org_manager_remove_org_group (CcnetOrgManager *mgr,
int org_id,
int group_id,
GError **error);
int
ccnet_org_manager_is_org_group (CcnetOrgManager *mgr,
int group_id,
GError **error);
int
ccnet_org_manager_get_org_id_by_group (CcnetOrgManager *mgr,
int group_id,
GError **error);
GList *
ccnet_org_manager_get_org_group_ids (CcnetOrgManager *mgr,
int org_id,
int start,
int limit);
GList *
ccnet_org_manager_get_org_groups (CcnetOrgManager *mgr,
int org_id,
int start,
int limit);
GList *
ccnet_org_manager_get_org_groups_by_user (CcnetOrgManager *mgr,
const char *user,
int org_id);
GList *
ccnet_org_manager_get_org_top_groups (CcnetOrgManager *mgr, int org_id, GError **error);
int
ccnet_org_manager_org_user_exists (CcnetOrgManager *mgr,
int org_id,
const char *email,
GError **error);
char *
ccnet_org_manager_get_url_prefix_by_org_id (CcnetOrgManager *mgr,
int org_id,
GError **error);
int
ccnet_org_manager_is_org_staff (CcnetOrgManager *mgr,
int org_id,
const char *email,
GError **error);
int
ccnet_org_manager_set_org_staff (CcnetOrgManager *mgr,
int org_id,
const char *email,
GError **error);
int
ccnet_org_manager_unset_org_staff (CcnetOrgManager *mgr,
int org_id,
const char *email,
GError **error);
int
ccnet_org_manager_set_org_name(CcnetOrgManager *mgr,
int org_id,
const char *org_name,
GError **error);
#endif /* _ORG_MGR_H_ */
================================================
FILE: common/password-hash.c
================================================
/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
#include
#include
#include
#include "password-hash.h"
#include "seafile-crypt.h"
#include
#include "utils.h"
#include "log.h"
// pbkdf2
typedef struct Pbkdf2Params {
int iteration;
} Pbkdf2Params;
static Pbkdf2Params *
parse_pbkdf2_sha256_params (const char *params_str)
{
Pbkdf2Params *params = NULL;
if (!params_str) {
params = g_new0 (Pbkdf2Params, 1);
params->iteration = 1000;
return params;
}
int iteration;
iteration = atoi (params_str);
if (iteration <= 0) {
iteration = 1000;
}
params = g_new0 (Pbkdf2Params, 1);
params->iteration = iteration;
return params;
}
static int
pbkdf2_sha256_derive_key (const char *data_in, int in_len,
const char *salt,
Pbkdf2Params *params,
unsigned char *key)
{
int iteration = params->iteration;
unsigned char salt_bin[32] = {0};
hex_to_rawdata (salt, salt_bin, 32);
PKCS5_PBKDF2_HMAC (data_in, in_len,
salt_bin, sizeof(salt_bin),
iteration,
EVP_sha256(),
32, key);
return 0;
}
// argon2id
typedef struct Argon2idParams{
gint64 time_cost;
gint64 memory_cost;
gint64 parallelism;
} Argon2idParams;
// The arguments to argon2 are separated by commas.
// Example arguments format:
// 2,102400,8
// The parameters are time_cost, memory_cost, parallelism from left to right.
static Argon2idParams *
parse_argon2id_params (const char *params_str)
{
char **params;
Argon2idParams *argon2_params = g_new0 (Argon2idParams, 1);
if (params_str)
params = g_strsplit (params_str, ",", 3);
if (!params_str || g_strv_length(params) != 3) {
if (params_str)
g_strfreev (params);
argon2_params->time_cost = 2; // 2-pass computation
argon2_params->memory_cost = 102400; // 100 mebibytes memory usage
argon2_params->parallelism = 8; // number of threads and lanes
return argon2_params;
}
char *p = NULL;
p = g_strstrip (params[0]);
argon2_params->time_cost = atoll (p);
if (argon2_params->time_cost <= 0) {
argon2_params->time_cost = 2;
}
p = g_strstrip (params[1]);
argon2_params->memory_cost = atoll (p);
if (argon2_params->memory_cost <= 0) {
argon2_params->memory_cost = 102400;
}
p = g_strstrip (params[2]);
argon2_params->parallelism = atoll (p);
if (argon2_params->parallelism <= 0) {
argon2_params->parallelism = 8;
}
g_strfreev (params);
return argon2_params;
}
static int
argon2id_derive_key (const char *data_in, int in_len,
const char *salt,
Argon2idParams *params,
unsigned char *key)
{
unsigned char salt_bin[32] = {0};
hex_to_rawdata (salt, salt_bin, 32);
argon2id_hash_raw(params->time_cost, params->memory_cost, params->parallelism,
data_in, in_len,
salt_bin, sizeof(salt_bin),
key, 32);
return 0;
}
// parse_pwd_hash_params is used to parse default pwd hash algorithms.
void
parse_pwd_hash_params (const char *algo, const char *params_str, PwdHashParams *params)
{
if (g_strcmp0 (algo, PWD_HASH_PDKDF2) == 0) {
params->algo = g_strdup (PWD_HASH_PDKDF2);
if (params_str)
params->params_str = g_strdup (params_str);
else
params->params_str = g_strdup ("1000");
} else if (g_strcmp0 (algo, PWD_HASH_ARGON2ID) == 0) {
params->algo = g_strdup (PWD_HASH_ARGON2ID);
if (params_str)
params->params_str = g_strdup (params_str);
else
params->params_str = g_strdup ("2,102400,8");
} else {
params->algo = NULL;
}
seaf_message ("password hash algorithms: %s, params: %s\n ", params->algo, params->params_str);
}
int
pwd_hash_derive_key (const char *data_in, int in_len,
const char *salt,
const char *algo, const char *params_str,
unsigned char *key)
{
int ret = 0;
if (g_strcmp0 (algo, PWD_HASH_ARGON2ID) == 0) {
Argon2idParams *algo_params = parse_argon2id_params (params_str);
ret = argon2id_derive_key (data_in, in_len,
salt, algo_params, key);
g_free (algo_params);
return ret;
} else {
Pbkdf2Params *algo_params = parse_pbkdf2_sha256_params (params_str);
ret = pbkdf2_sha256_derive_key (data_in, in_len,
salt, algo_params, key);
g_free (algo_params);
return ret;
}
}
================================================
FILE: common/password-hash.h
================================================
/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
#ifndef _PASSWORD_HASH_H
#define _PASSWORD_HASH_H
#define PWD_HASH_PDKDF2 "pbkdf2_sha256"
#define PWD_HASH_ARGON2ID "argon2id"
typedef struct _PwdHashParams {
char *algo;
char *params_str;
} PwdHashParams;
void
parse_pwd_hash_params (const char *algo, const char *params_str, PwdHashParams *params);
int
pwd_hash_derive_key (const char *data_in, int in_len,
const char *repo_salt,
const char *algo, const char *params_str,
unsigned char *key);
#endif
================================================
FILE: common/processors/objecttx-common.h
================================================
/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
#ifndef OBJECTTX_COMMON_H
#define OBJECTTX_COMMON_H
#define SC_GET_OBJECT "301"
#define SS_GET_OBJECT "Get Object"
#define SC_OBJECT "302"
#define SS_OBJECT "Object"
#define SC_END "303"
#define SS_END "END"
#define SC_COMMIT_IDS "304"
#define SS_COMMIT_IDS "Commit IDs"
#define SC_ACK "305"
#define SS_ACK "Ack"
#define SC_OBJ_SEG "306"
#define SS_OBJ_SEG "Object Segment"
#define SC_OBJ_SEG_END "307"
#define SS_OBJ_SEG_END "Object Segment End"
#define SC_OBJ_LIST_SEG "308"
#define SS_OBJ_LIST_SEG "Object List Segment"
#define SC_OBJ_LIST_SEG_END "309"
#define SS_OBJ_LIST_SEG_END "Object List Segment End"
#define SC_NOT_FOUND "401"
#define SS_NOT_FOUND "Object not found"
#define SC_BAD_OL "402"
#define SS_BAD_OL "Bad Object List"
#define SC_BAD_OBJECT "403"
#define SS_BAD_OBJECT "Bad Object"
#define SC_ACCESS_DENIED "410"
#define SS_ACCESS_DENIED "Access denied"
/* for fs transfer */
#define SC_ROOT "304"
#define SS_ROOT "FS Root"
#define SC_ROOT_END "305"
#define SS_ROOT_END "FS Root End"
/* max fs object segment size */
#define MAX_OBJ_SEG_SIZE 64000
typedef struct {
char id[41];
uint8_t object[0];
} __attribute__((__packed__)) ObjectPack;
#endif
================================================
FILE: common/redis-cache.c
================================================
/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
#include "common.h"
#include
#include "redis-cache.h"
#define DEBUG_FLAG SEAFILE_DEBUG_OTHER
#include "log.h"
struct _RedisConnectionPool {
char *host;
int port;
GPtrArray *connections;
pthread_mutex_t lock;
int max_connections;
};
typedef struct _RedisConnectionPool RedisConnectionPool;
struct _RedisConnection {
gboolean is_available;
redisContext *ac;
gint64 ctime; /* Used to clean up unused connection. */
gboolean release; /* If TRUE, the connection will be released. */
};
typedef struct _RedisConnection RedisConnection;
typedef struct RedisPriv {
RedisConnectionPool *redis_pool;
char *passwd;
} RedisPriv;
static int
redis_auth (RedisConnection *conn, const char *passwd)
{
redisReply *reply;
int ret = 0;
if (!passwd) {
return 0;
}
reply = redisCommand(conn->ac, "AUTH %s", passwd);
if (!reply) {
seaf_warning ("Failed to auth redis server.\n");
ret = -1;
goto out;
}
if (reply->type != REDIS_REPLY_STATUS ||
g_strcmp0 (reply->str, "OK") != 0) {
if (reply->type == REDIS_REPLY_ERROR) {
seaf_warning ("Failed to auth redis: %s.\n", reply->str);
}
ret = -1;
goto out;
}
out:
freeReplyObject (reply);
return ret;
}
static RedisConnection *
redis_connection_new (const char *host, const char *passwd, int port)
{
RedisConnection *conn = g_new0 (RedisConnection, 1);
conn->ac = redisConnect(host, port);
if (!conn->ac || conn->ac->err) {
if (conn->ac) {
seaf_warning ("Failed to connect to redis : %s\n", conn->ac->errstr);
redisFree (conn->ac);
} else {
seaf_warning ("Can't allocate redis context\n");
}
g_free (conn);
return NULL;
}
if (redis_auth (conn, passwd) < 0) {
redisFree (conn->ac);
g_free (conn);
return NULL;
}
conn->ctime = (gint64)time(NULL);
return conn;
}
static void
redis_connection_free (RedisConnection *conn)
{
if (!conn)
return;
if (conn->ac)
redisFree(conn->ac);
g_free (conn);
}
static RedisConnectionPool *
redis_connection_pool_new (const char *host, int port, int max_connections)
{
RedisConnectionPool *pool = g_new0 (RedisConnectionPool, 1);
pool->host = g_strdup(host);
pool->port = port;
pool->connections = g_ptr_array_sized_new (max_connections);
pool->max_connections = max_connections;
pthread_mutex_init (&pool->lock, NULL);
return pool;
}
static RedisConnection *
redis_connection_pool_get_connection (RedisConnectionPool *pool, const char *passwd)
{
RedisConnection *conn = NULL;
if (pool->max_connections == 0) {
conn = redis_connection_new (pool->host, passwd, pool->port);
return conn;
}
pthread_mutex_lock (&pool->lock);
guint i, size = pool->connections->len;
for (i = 0; i < size; ++i) {
conn = g_ptr_array_index (pool->connections, i);
if (!conn->is_available) {
continue;
}
conn->is_available = FALSE;
goto out;
}
conn = NULL;
if (size < pool->max_connections) {
conn = redis_connection_new (pool->host, passwd, pool->port);
if (conn) {
conn->is_available = FALSE;
g_ptr_array_add (pool->connections, conn);
}
} else {
seaf_warning ("The number of redis connections exceeds the limit. The maximum connections is %d.\n", pool->max_connections);
}
out:
pthread_mutex_unlock (&pool->lock);
return conn;
}
static void
redis_connection_pool_return_connection (RedisConnectionPool *pool, RedisConnection *conn)
{
if (!conn)
return;
if (pool->max_connections == 0) {
redis_connection_free (conn);
return;
}
if (conn->release) {
pthread_mutex_lock (&pool->lock);
g_ptr_array_remove (pool->connections, conn);
pthread_mutex_unlock (&pool->lock);
redis_connection_free (conn);
return;
}
pthread_mutex_lock (&pool->lock);
conn->is_available = TRUE;
pthread_mutex_unlock (&pool->lock);
}
void *
redis_cache_get_object (ObjCache *cache, const char *obj_id, size_t *len)
{
RedisConnection *conn;
char *object = NULL;
redisReply *reply;
RedisPriv *priv = cache->priv;
RedisConnectionPool *pool = priv->redis_pool;
conn = redis_connection_pool_get_connection (pool, priv->passwd);
if (!conn) {
seaf_warning ("Failed to get redis connection to host %s.\n", cache->host);
return NULL;
}
reply = redisCommand(conn->ac, "GET %s", obj_id);
if (!reply) {
seaf_warning ("Failed to get object %s from redis cache.\n", obj_id);
conn->release = TRUE;
goto out;
}
if (reply->type != REDIS_REPLY_STRING) {
if (reply->type == REDIS_REPLY_ERROR) {
conn->release = TRUE;
seaf_warning ("Failed to get %s from redis cache: %s.\n",
obj_id, reply->str);
}
goto out;
}
*len = reply->len;
object = g_memdup (reply->str, reply->len);
out:
freeReplyObject(reply);
redis_connection_pool_return_connection (pool, conn);
return object;
}
int
redis_cache_set_object (ObjCache *cache,
const char *obj_id,
const void *object,
int len,
int expiry)
{
RedisConnection *conn;
redisReply *reply;
int ret = 0;
RedisPriv *priv = cache->priv;
RedisConnectionPool *pool = priv->redis_pool;
conn = redis_connection_pool_get_connection (pool, priv->passwd);
if (!conn) {
seaf_warning ("Failed to get redis connection to host %s.\n", cache->host);
return -1;
}
if (expiry <= 0)
expiry = cache->mc_expiry;
reply = redisCommand(conn->ac, "SET %s %b EX %d", obj_id, object, len, expiry);
if (!reply) {
seaf_warning ("Failed to set object %s to redis cache.\n", obj_id);
ret = -1;
conn->release = TRUE;
goto out;
}
if (reply->type != REDIS_REPLY_STATUS ||
g_strcmp0 (reply->str, "OK") != 0) {
if (reply->type == REDIS_REPLY_ERROR) {
conn->release = TRUE;
seaf_warning ("Failed to set %s to redis: %s.\n",
obj_id, reply->str);
}
ret = -1;
}
out:
freeReplyObject(reply);
redis_connection_pool_return_connection (pool, conn);
return ret;
}
gboolean
redis_cache_test_object (ObjCache *cache, const char *obj_id)
{
RedisConnection *conn;
redisReply *reply;
gboolean ret = FALSE;
RedisPriv *priv = cache->priv;
RedisConnectionPool *pool = priv->redis_pool;
conn = redis_connection_pool_get_connection (pool, priv->passwd);
if (!conn) {
seaf_warning ("Failed to get redis connection to host %s.\n", cache->host);
return ret;
}
reply = redisCommand(conn->ac, "EXISTS %s", obj_id);
if (!reply) {
seaf_warning ("Failed to test object %s from redis cache.\n", obj_id);
conn->release = TRUE;
goto out;
}
if (reply->type != REDIS_REPLY_INTEGER ||
reply->integer != 1) {
if (reply->type == REDIS_REPLY_ERROR) {
conn->release = TRUE;
seaf_warning ("Failed to test %s from redis: %s.\n",
obj_id, reply->str);
}
goto out;
}
ret = TRUE;
out:
freeReplyObject(reply);
redis_connection_pool_return_connection (pool, conn);
return ret;
}
int
redis_cache_delete_object (ObjCache *cache, const char *obj_id)
{
RedisConnection *conn;
redisReply *reply;
int ret = 0;
RedisPriv *priv = cache->priv;
RedisConnectionPool *pool = priv->redis_pool;
conn = redis_connection_pool_get_connection (pool, priv->passwd);
if (!conn) {
seaf_warning ("Failed to get redis connection to host %s.\n", cache->host);
return -1;
}
reply = redisCommand(conn->ac, "DEL %s", obj_id);
if (!reply) {
seaf_warning ("Failed to delete object %s from redis cache.\n", obj_id);
ret = -1;
conn->release = TRUE;
goto out;
}
if (reply->type != REDIS_REPLY_INTEGER ||
reply->integer != 1) {
if (reply->type == REDIS_REPLY_ERROR) {
conn->release = TRUE;
seaf_warning ("Failed to del %s from redis: %s.\n",
obj_id, reply->str);
}
ret = -1;
}
out:
freeReplyObject(reply);
redis_connection_pool_return_connection (pool, conn);
return ret;
}
int
redis_cache_publish (ObjCache *cache, const char *channel, const char *msg)
{
RedisConnection *conn;
redisReply *reply;
int ret = 0;
RedisPriv *priv = cache->priv;
RedisConnectionPool *pool = priv->redis_pool;
conn = redis_connection_pool_get_connection (pool, priv->passwd);
if (!conn) {
seaf_warning ("Failed to get redis connection to host %s.\n", cache->host);
return -1;
}
reply = redisCommand(conn->ac, "PUBLISH %s %s", channel, msg);
if (!reply) {
seaf_warning ("Failed to publish message to redis channel %s.\n", channel);
ret = -1;
conn->release = TRUE;
goto out;
}
if (reply->type != REDIS_REPLY_INTEGER ||
reply->integer < 0) {
if (reply->type == REDIS_REPLY_ERROR) {
conn->release = TRUE;
seaf_warning ("Failed to publish message to redis channel %s.\n", channel);
}
ret = -1;
}
out:
freeReplyObject(reply);
redis_connection_pool_return_connection (pool, conn);
return ret;
}
int
redis_cache_push (ObjCache *cache, const char *list, const char *msg)
{
RedisConnection *conn;
redisReply *reply;
int ret = 0;
RedisPriv *priv = cache->priv;
RedisConnectionPool *pool = priv->redis_pool;
conn = redis_connection_pool_get_connection (pool, priv->passwd);
if (!conn) {
seaf_warning ("Failed to get redis connection to host %s.\n", cache->host);
return -1;
}
reply = redisCommand(conn->ac, "LPUSH %s %s", list, msg);
if (!reply) {
seaf_warning ("Failed to push message to redis list %s.\n", list);
ret = -1;
conn->release = TRUE;
goto out;
}
if (reply->type != REDIS_REPLY_INTEGER ||
reply->integer < 0) {
if (reply->type == REDIS_REPLY_ERROR) {
conn->release = TRUE;
seaf_warning ("Failed to push message to redis list %s.\n", list);
}
ret = -1;
}
out:
freeReplyObject(reply);
redis_connection_pool_return_connection (pool, conn);
return ret;
}
ObjCache *
redis_cache_new (const char *host, const char *passwd,
int port, int redis_expiry,
int max_connections)
{
ObjCache *cache = g_new0 (ObjCache, 1);
RedisPriv *priv = g_new0 (RedisPriv, 1);
priv->redis_pool = redis_connection_pool_new (host, port, max_connections);
cache->priv = priv;
cache->host = g_strdup (host);
priv->passwd = g_strdup (passwd);
cache->port = port;
cache->mc_expiry = redis_expiry;
cache->cache_type = TYPE_REDIS;
cache->get_object = redis_cache_get_object;
cache->set_object = redis_cache_set_object;
cache->test_object = redis_cache_test_object;
cache->delete_object = redis_cache_delete_object;
cache->publish = redis_cache_publish;
cache->push = redis_cache_push;
return cache;
}
================================================
FILE: common/redis-cache.h
================================================
/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
#ifndef REDIS_CACHE_H
#define REDIS_CACHE_H
#include "obj-cache.h"
ObjCache *
redis_cache_new (const char *host, const char *passwd,
int port, int mc_expiry,
int max_connections);
#endif
================================================
FILE: common/rpc-service.c
================================================
/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
#include "common.h"
#include
#include
#include
#include
#include "utils.h"
#include "seafile-session.h"
#include "seaf-utils.h"
#include "fs-mgr.h"
#include "repo-mgr.h"
#include "seafile-error.h"
#include "seafile-rpc.h"
#include "mq-mgr.h"
#include "password-hash.h"
#ifdef SEAFILE_SERVER
#include "web-accesstoken-mgr.h"
#endif
#ifndef SEAFILE_SERVER
#include "seafile-config.h"
#endif
#define DEBUG_FLAG SEAFILE_DEBUG_OTHER
#include "log.h"
#ifndef SEAFILE_SERVER
#include "../daemon/vc-utils.h"
#endif /* SEAFILE_SERVER */
/* -------- Utilities -------- */
static GObject*
convert_repo (SeafRepo *r)
{
SeafileRepo *repo = NULL;
#ifndef SEAFILE_SERVER
if (r->head == NULL)
return NULL;
if (r->worktree_invalid && !seafile_session_config_get_allow_invalid_worktree(seaf))
return NULL;
#endif
repo = seafile_repo_new ();
if (!repo)
return NULL;
g_object_set (repo, "id", r->id, "name", r->name,
"desc", r->desc, "encrypted", r->encrypted,
"magic", r->magic, "enc_version", r->enc_version,
"pwd_hash", r->pwd_hash,
"pwd_hash_algo", r->pwd_hash_algo, "pwd_hash_params", r->pwd_hash_params,
"head_cmmt_id", r->head ? r->head->commit_id : NULL,
"root", r->root_id,
"version", r->version, "last_modify", r->last_modify,
"last_modifier", r->last_modifier,
NULL);
g_object_set (repo,
"repo_id", r->id, "repo_name", r->name,
"repo_desc", r->desc, "last_modified", r->last_modify,
"status", r->status,
"repo_type", r->type,
NULL);
#ifdef SEAFILE_SERVER
if (r->virtual_info) {
g_object_set (repo,
"is_virtual", TRUE,
"origin_repo_id", r->virtual_info->origin_repo_id,
"origin_path", r->virtual_info->path,
NULL);
}
if (r->encrypted) {
if (r->enc_version >= 2)
g_object_set (repo, "random_key", r->random_key, NULL);
if (r->enc_version >= 3)
g_object_set (repo, "salt", r->salt, NULL);
}
g_object_set (repo, "store_id", r->store_id,
"repaired", r->repaired,
"size", r->size, "file_count", r->file_count, NULL);
g_object_set (repo, "is_corrupted", r->is_corrupted, NULL);
#endif
#ifndef SEAFILE_SERVER
g_object_set (repo, "worktree", r->worktree,
"relay-id", r->relay_id,
"worktree-invalid", r->worktree_invalid,
"last-sync-time", r->last_sync_time,
"auto-sync", r->auto_sync,
NULL);
#endif /* SEAFILE_SERVER */
return (GObject *)repo;
}
static void
free_repo_obj (gpointer repo)
{
if (!repo)
return;
g_object_unref ((GObject *)repo);
}
static GList *
convert_repo_list (GList *inner_repos)
{
GList *ret = NULL, *ptr;
GObject *repo = NULL;
for (ptr = inner_repos; ptr; ptr=ptr->next) {
SeafRepo *r = ptr->data;
repo = convert_repo (r);
if (!repo) {
g_list_free_full (ret, free_repo_obj);
return NULL;
}
ret = g_list_prepend (ret, repo);
}
return g_list_reverse (ret);
}
/*
* RPC functions available for both clients and server.
*/
GList *
seafile_branch_gets (const char *repo_id, GError **error)
{
if (!is_uuid_valid (repo_id)) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid repo id");
return NULL;
}
GList *blist = seaf_branch_manager_get_branch_list(seaf->branch_mgr,
repo_id);
GList *ptr;
GList *ret = NULL;
for (ptr = blist; ptr; ptr=ptr->next) {
SeafBranch *b = ptr->data;
SeafileBranch *branch = seafile_branch_new ();
g_object_set (branch, "repo_id", b->repo_id, "name", b->name,
"commit_id", b->commit_id, NULL);
ret = g_list_prepend (ret, branch);
seaf_branch_unref (b);
}
ret = g_list_reverse (ret);
g_list_free (blist);
return ret;
}
#ifdef SEAFILE_SERVER
GList*
seafile_get_trash_repo_list (int start, int limit, GError **error)
{
return seaf_repo_manager_get_trash_repo_list (seaf->repo_mgr,
start, limit,
error);
}
GList *
seafile_get_trash_repos_by_owner (const char *owner, GError **error)
{
if (!owner) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Argument should not be null");
return NULL;
}
return seaf_repo_manager_get_trash_repos_by_owner (seaf->repo_mgr,
owner,
error);
}
int
seafile_del_repo_from_trash (const char *repo_id, GError **error)
{
int ret = 0;
if (!repo_id) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Argument should not be null");
return -1;
}
if (!is_uuid_valid (repo_id)) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid repo id");
return -1;
}
ret = seaf_repo_manager_del_repo_from_trash (seaf->repo_mgr, repo_id, error);
return ret;
}
int
seafile_empty_repo_trash (GError **error)
{
return seaf_repo_manager_empty_repo_trash (seaf->repo_mgr, error);
}
int
seafile_empty_repo_trash_by_owner (const char *owner, GError **error)
{
if (!owner) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Argument should not be null");
return -1;
}
return seaf_repo_manager_empty_repo_trash_by_owner (seaf->repo_mgr, owner, error);
}
int
seafile_restore_repo_from_trash (const char *repo_id, GError **error)
{
int ret = 0;
if (!repo_id) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Argument should not be null");
return -1;
}
if (!is_uuid_valid (repo_id)) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid repo id");
return -1;
}
ret = seaf_repo_manager_restore_repo_from_trash (seaf->repo_mgr, repo_id, error);
return ret;
}
int
seafile_publish_event(const char *channel, const char *content, GError **error)
{
int ret = 0;
if (!channel || !content) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Argument should not be null");
return -1;
}
ret = seaf_mq_manager_publish_event (seaf->mq_mgr, channel, content);
return ret;
}
json_t *
seafile_pop_event(const char *channel, GError **error)
{
if (!channel) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Argument should not be null");
return NULL;
}
return seaf_mq_manager_pop_event (seaf->mq_mgr, channel);
}
#endif
GList*
seafile_get_repo_list (int start, int limit, const char *order_by, int ret_virt_repo, GError **error)
{
GList *repos = seaf_repo_manager_get_repo_list(seaf->repo_mgr, start, limit, order_by, ret_virt_repo);
GList *ret = NULL;
ret = convert_repo_list (repos);
#ifdef SEAFILE_SERVER
GList *ptr;
for (ptr = repos; ptr != NULL; ptr = ptr->next)
seaf_repo_unref ((SeafRepo *)ptr->data);
#endif
g_list_free (repos);
return ret;
}
#ifdef SEAFILE_SERVER
gint64
seafile_count_repos (GError **error)
{
return seaf_repo_manager_count_repos (seaf->repo_mgr, error);
}
#endif
GObject*
seafile_get_repo (const char *repo_id, GError **error)
{
SeafRepo *r;
if (!repo_id) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Argument should not be null");
return NULL;
}
if (!is_uuid_valid (repo_id)) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid repo id");
return NULL;
}
r = seaf_repo_manager_get_repo (seaf->repo_mgr, repo_id);
/* Don't return repo that's not checked out. */
if (r == NULL)
return NULL;
GObject *repo = convert_repo (r);
#ifdef SEAFILE_SERVER
seaf_repo_unref (r);
#endif
return repo;
}
SeafileCommit *
convert_to_seafile_commit (SeafCommit *c)
{
SeafileCommit *commit = seafile_commit_new ();
g_object_set (commit,
"id", c->commit_id,
"creator_name", c->creator_name,
"creator", c->creator_id,
"desc", c->desc,
"ctime", c->ctime,
"repo_id", c->repo_id,
"root_id", c->root_id,
"parent_id", c->parent_id,
"second_parent_id", c->second_parent_id,
"version", c->version,
"new_merge", c->new_merge,
"conflict", c->conflict,
"device_name", c->device_name,
"client_version", c->client_version,
NULL);
return commit;
}
GObject*
seafile_get_commit (const char *repo_id, int version,
const gchar *id, GError **error)
{
SeafileCommit *commit;
SeafCommit *c;
if (!repo_id || !is_uuid_valid(repo_id)) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid repo id");
return NULL;
}
if (!id || !is_object_id_valid(id)) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid commit id");
return NULL;
}
c = seaf_commit_manager_get_commit (seaf->commit_mgr, repo_id, version, id);
if (!c)
return NULL;
commit = convert_to_seafile_commit (c);
seaf_commit_unref (c);
return (GObject *)commit;
}
struct CollectParam {
int offset;
int limit;
int count;
GList *commits;
#ifdef SEAFILE_SERVER
gint64 truncate_time;
gboolean traversed_head;
#endif
};
static gboolean
get_commit (SeafCommit *c, void *data, gboolean *stop)
{
struct CollectParam *cp = data;
#ifdef SEAFILE_SERVER
if (cp->truncate_time == 0)
{
*stop = TRUE;
/* Stop after traversing the head commit. */
}
/* We use <= here. This is for handling clean trash and history.
* If the user cleans all history, truncate time will be equal to
* the commit's ctime. In such case, we don't actually want to display
* this commit.
*/
else if (cp->truncate_time > 0 &&
(gint64)(c->ctime) <= cp->truncate_time &&
cp->traversed_head)
{
/* Still traverse the first commit older than truncate_time.
* If a file in the child commit of this commit is deleted,
* we need to access this commit in order to restore it
* from trash.
*/
*stop = TRUE;
}
/* Always traverse the head commit. */
if (!cp->traversed_head)
cp->traversed_head = TRUE;
#endif
/* if offset = 1, limit = 1, we should stop when the count = 2 */
if (cp->limit > 0 && cp->count >= cp->offset + cp->limit) {
*stop = TRUE;
return TRUE; /* TRUE to indicate no error */
}
if (cp->count >= cp->offset) {
SeafileCommit *commit = convert_to_seafile_commit (c);
cp->commits = g_list_prepend (cp->commits, commit);
}
++cp->count;
return TRUE; /* TRUE to indicate no error */
}
GList*
seafile_get_commit_list (const char *repo_id,
int offset,
int limit,
GError **error)
{
SeafRepo *repo;
GList *commits = NULL;
gboolean ret;
struct CollectParam cp;
char *commit_id;
if (!is_uuid_valid (repo_id)) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid repo id");
return NULL;
}
/* correct parameter */
if (offset < 0)
offset = 0;
if (!repo_id) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Argument should not be null");
return NULL;
}
repo = seaf_repo_manager_get_repo (seaf->repo_mgr, repo_id);
if (!repo) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_REPO, "No such repository");
return NULL;
}
if (!repo->head) {
SeafBranch *branch =
seaf_branch_manager_get_branch (seaf->branch_mgr,
repo->id, "master");
if (branch != NULL) {
commit_id = g_strdup (branch->commit_id);
seaf_branch_unref (branch);
} else {
seaf_warning ("[repo-mgr] Failed to get repo %s branch master\n",
repo_id);
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_REPO,
"No head and branch master");
#ifdef SEAFILE_SERVER
seaf_repo_unref (repo);
#endif
return NULL;
}
} else {
commit_id = g_strdup (repo->head->commit_id);
}
/* Init CollectParam */
memset (&cp, 0, sizeof(cp));
cp.offset = offset;
cp.limit = limit;
#ifdef SEAFILE_SERVER
cp.truncate_time = seaf_repo_manager_get_repo_truncate_time (seaf->repo_mgr,
repo_id);
#endif
ret =
seaf_commit_manager_traverse_commit_tree (seaf->commit_mgr,
repo->id, repo->version,
commit_id, get_commit, &cp, TRUE);
g_free (commit_id);
#ifdef SEAFILE_SERVER
seaf_repo_unref (repo);
#endif
if (!ret) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_LIST_COMMITS, "Failed to list commits");
return NULL;
}
commits = g_list_reverse (cp.commits);
return commits;
}
#ifndef SEAFILE_SERVER
static
int do_unsync_repo(SeafRepo *repo)
{
if (!seaf->started) {
seaf_message ("System not started, skip removing repo.\n");
return -1;
}
if (repo->auto_sync && (repo->sync_interval == 0))
seaf_wt_monitor_unwatch_repo (seaf->wt_monitor, repo->id);
seaf_sync_manager_cancel_sync_task (seaf->sync_mgr, repo->id);
SyncInfo *info = seaf_sync_manager_get_sync_info (seaf->sync_mgr, repo->id);
/* If we are syncing the repo,
* we just mark the repo as deleted and let sync-mgr actually delete it.
* Otherwise we are safe to delete the repo.
*/
char *worktree = g_strdup (repo->worktree);
if (info != NULL && info->in_sync) {
seaf_repo_manager_mark_repo_deleted (seaf->repo_mgr, repo);
} else {
seaf_repo_manager_del_repo (seaf->repo_mgr, repo);
}
g_free (worktree);
return 0;
}
static void
cancel_clone_tasks_by_account (const char *account_server, const char *account_email)
{
GList *ptr, *tasks;
CloneTask *task;
tasks = seaf_clone_manager_get_tasks (seaf->clone_mgr);
for (ptr = tasks; ptr != NULL; ptr = ptr->next) {
task = ptr->data;
if (g_strcmp0(account_server, task->peer_addr) == 0
&& g_strcmp0(account_email, task->email) == 0) {
seaf_clone_manager_cancel_task (seaf->clone_mgr, task->repo_id);
}
}
g_list_free (tasks);
}
int
seafile_unsync_repos_by_account (const char *server_addr, const char *email, GError **error)
{
if (!server_addr || !email) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Argument should not be null");
return -1;
}
GList *ptr, *repos = seaf_repo_manager_get_repo_list(seaf->repo_mgr, -1, -1, NULL, 0);
if (!repos) {
return 0;
}
for (ptr = repos; ptr; ptr = ptr->next) {
SeafRepo *repo = (SeafRepo*)ptr->data;
char *addr = NULL;
seaf_repo_manager_get_repo_relay_info(seaf->repo_mgr,
repo->id,
&addr, /* addr */
NULL); /* port */
if (g_strcmp0(addr, server_addr) == 0 && g_strcmp0(repo->email, email) == 0) {
if (do_unsync_repo(repo) < 0) {
return -1;
}
}
g_free (addr);
}
g_list_free (repos);
cancel_clone_tasks_by_account (server_addr, email);
return 0;
}
int
seafile_remove_repo_tokens_by_account (const char *server_addr, const char *email, GError **error)
{
if (!server_addr || !email) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Argument should not be null");
return -1;
}
GList *ptr, *repos = seaf_repo_manager_get_repo_list(seaf->repo_mgr, -1, -1, NULL, 0);
if (!repos) {
return 0;
}
for (ptr = repos; ptr; ptr = ptr->next) {
SeafRepo *repo = (SeafRepo*)ptr->data;
char *addr = NULL;
seaf_repo_manager_get_repo_relay_info(seaf->repo_mgr,
repo->id,
&addr, /* addr */
NULL); /* port */
if (g_strcmp0(addr, server_addr) == 0 && g_strcmp0(repo->email, email) == 0) {
if (seaf_repo_manager_remove_repo_token(seaf->repo_mgr, repo) < 0) {
return -1;
}
}
g_free (addr);
}
g_list_free (repos);
cancel_clone_tasks_by_account (server_addr, email);
return 0;
}
int
seafile_set_repo_token (const char *repo_id,
const char *token,
GError **error)
{
int ret;
if (repo_id == NULL || token == NULL) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Arguments should not be empty");
return -1;
}
SeafRepo *repo;
repo = seaf_repo_manager_get_repo (seaf->repo_mgr, repo_id);
if (!repo) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_REPO, "Can't find Repo %s", repo_id);
return -1;
}
ret = seaf_repo_manager_set_repo_token (seaf->repo_mgr,
repo, token);
if (ret < 0) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_INTERNAL,
"Failed to set token for repo %s", repo_id);
return -1;
}
return 0;
}
#endif
int
seafile_destroy_repo (const char *repo_id, GError **error)
{
if (!repo_id) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Argument should not be null");
return -1;
}
if (!is_uuid_valid (repo_id)) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid repo id");
return -1;
}
#ifndef SEAFILE_SERVER
SeafRepo *repo;
repo = seaf_repo_manager_get_repo (seaf->repo_mgr, repo_id);
if (!repo) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "No such repository");
return -1;
}
return do_unsync_repo(repo);
#else
return seaf_repo_manager_del_repo (seaf->repo_mgr, repo_id, error);
#endif
}
GObject *
seafile_generate_magic_and_random_key(int enc_version,
const char* repo_id,
const char *passwd,
GError **error)
{
if (!repo_id || !passwd) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Argument should not be null");
return NULL;
}
gchar salt[65] = {0};
gchar magic[65] = {0};
gchar pwd_hash[65] = {0};
gchar random_key[97] = {0};
if (enc_version >= 3 && seafile_generate_repo_salt (salt) < 0) {
return NULL;
}
seafile_generate_magic (enc_version, repo_id, passwd, salt, magic);
if (seafile_generate_random_key (passwd, enc_version, salt, random_key) < 0) {
return NULL;
}
SeafileEncryptionInfo *sinfo;
sinfo = g_object_new (SEAFILE_TYPE_ENCRYPTION_INFO,
"repo_id", repo_id,
"passwd", passwd,
"enc_version", enc_version,
"magic", magic,
"random_key", random_key,
NULL);
if (enc_version >= 3)
g_object_set (sinfo, "salt", salt, NULL);
return (GObject *)sinfo;
}
#include "diff-simple.h"
inline static const char*
get_diff_status_str(char status)
{
if (status == DIFF_STATUS_ADDED)
return "add";
if (status == DIFF_STATUS_DELETED)
return "del";
if (status == DIFF_STATUS_MODIFIED)
return "mod";
if (status == DIFF_STATUS_RENAMED)
return "mov";
if (status == DIFF_STATUS_DIR_ADDED)
return "newdir";
if (status == DIFF_STATUS_DIR_DELETED)
return "deldir";
return NULL;
}
GList *
seafile_diff (const char *repo_id, const char *arg1, const char *arg2, int fold_dir_results, GError **error)
{
SeafRepo *repo;
char *err_msgs = NULL;
GList *diff_entries, *p;
GList *ret = NULL;
if (!repo_id || !arg1 || !arg2) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Argument should not be null");
return NULL;
}
if (!is_uuid_valid (repo_id)) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid repo id");
return NULL;
}
if ((arg1[0] != 0 && !is_object_id_valid (arg1)) || !is_object_id_valid(arg2)) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid commit id");
return NULL;
}
repo = seaf_repo_manager_get_repo (seaf->repo_mgr, repo_id);
if (!repo) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "No such repository");
return NULL;
}
diff_entries = seaf_repo_diff (repo, arg1, arg2, fold_dir_results, &err_msgs);
if (err_msgs) {
g_set_error (error, SEAFILE_DOMAIN, -1, "%s", err_msgs);
g_free (err_msgs);
#ifdef SEAFILE_SERVER
seaf_repo_unref (repo);
#endif
return NULL;
}
#ifdef SEAFILE_SERVER
seaf_repo_unref (repo);
#endif
for (p = diff_entries; p != NULL; p = p->next) {
DiffEntry *de = p->data;
SeafileDiffEntry *entry = g_object_new (
SEAFILE_TYPE_DIFF_ENTRY,
"status", get_diff_status_str(de->status),
"name", de->name,
"new_name", de->new_name,
NULL);
ret = g_list_prepend (ret, entry);
}
for (p = diff_entries; p != NULL; p = p->next) {
DiffEntry *de = p->data;
diff_entry_free (de);
}
g_list_free (diff_entries);
return g_list_reverse (ret);
}
/*
* RPC functions only available for server.
*/
#ifdef SEAFILE_SERVER
GList *
seafile_list_dir_by_path(const char *repo_id,
const char *commit_id,
const char *path, GError **error)
{
SeafRepo *repo = NULL;
SeafCommit *commit = NULL;
SeafDir *dir;
SeafDirent *dent;
SeafileDirent *d;
GList *ptr;
GList *res = NULL;
if (!repo_id || !commit_id || !path) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,
"Args can't be NULL");
return NULL;
}
if (!is_uuid_valid (repo_id)) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,
"Invalid repo id");
return NULL;
}
if (!is_object_id_valid (commit_id)) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,
"Invalid commit id");
return NULL;
}
repo = seaf_repo_manager_get_repo (seaf->repo_mgr, repo_id);
if (!repo) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Bad repo id");
return NULL;
}
commit = seaf_commit_manager_get_commit (seaf->commit_mgr,
repo_id, repo->version,
commit_id);
if (!commit) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_COMMIT, "No such commit");
goto out;
}
char *rpath = format_dir_path (path);
dir = seaf_fs_manager_get_seafdir_by_path (seaf->fs_mgr,
repo->store_id,
repo->version,
commit->root_id,
rpath, error);
g_free (rpath);
if (!dir) {
seaf_warning ("Can't find seaf dir for %s in repo %s\n", path, repo->store_id);
goto out;
}
for (ptr = dir->entries; ptr != NULL; ptr = ptr->next) {
dent = ptr->data;
if (!is_object_id_valid (dent->id))
continue;
d = g_object_new (SEAFILE_TYPE_DIRENT,
"obj_id", dent->id,
"obj_name", dent->name,
"mode", dent->mode,
"version", dent->version,
"mtime", dent->mtime,
"size", dent->size,
NULL);
res = g_list_prepend (res, d);
}
seaf_dir_free (dir);
res = g_list_reverse (res);
out:
seaf_repo_unref (repo);
seaf_commit_unref (commit);
return res;
}
static void
filter_error (GError **error)
{
if (*error && g_error_matches(*error,
SEAFILE_DOMAIN,
SEAF_ERR_PATH_NO_EXIST)) {
g_clear_error (error);
}
}
char *
seafile_get_dir_id_by_commit_and_path(const char *repo_id,
const char *commit_id,
const char *path,
GError **error)
{
SeafRepo *repo = NULL;
char *res = NULL;
SeafCommit *commit = NULL;
SeafDir *dir;
if (!repo_id || !commit_id || !path) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,
"Args can't be NULL");
return NULL;
}
if (!is_uuid_valid (repo_id)) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,
"Invalid repo id");
return NULL;
}
if (!is_object_id_valid (commit_id)) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,
"Invalid commit id");
return NULL;
}
repo = seaf_repo_manager_get_repo (seaf->repo_mgr, repo_id);
if (!repo) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Bad repo id");
return NULL;
}
commit = seaf_commit_manager_get_commit (seaf->commit_mgr,
repo_id, repo->version,
commit_id);
if (!commit) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_COMMIT, "No such commit");
goto out;
}
char *rpath = format_dir_path (path);
dir = seaf_fs_manager_get_seafdir_by_path (seaf->fs_mgr,
repo->store_id,
repo->version,
commit->root_id,
rpath, error);
g_free (rpath);
if (!dir) {
seaf_warning ("Can't find seaf dir for %s in repo %s\n", path, repo->store_id);
filter_error (error);
goto out;
}
res = g_strdup (dir->dir_id);
seaf_dir_free (dir);
out:
seaf_repo_unref (repo);
seaf_commit_unref (commit);
return res;
}
int
seafile_edit_repo (const char *repo_id,
const char *name,
const char *description,
const char *user,
GError **error)
{
return seaf_repo_manager_edit_repo (repo_id, name, description, user, error);
}
int
seafile_change_repo_passwd (const char *repo_id,
const char *old_passwd,
const char *new_passwd,
const char *user,
GError **error)
{
SeafRepo *repo = NULL;
SeafCommit *commit = NULL, *parent = NULL;
int ret = 0;
if (!user) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,
"No user given");
return -1;
}
if (!old_passwd || old_passwd[0] == 0 || !new_passwd || new_passwd[0] == 0) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,
"Empty passwd");
return -1;
}
if (!is_uuid_valid (repo_id)) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid repo id");
return -1;
}
retry:
repo = seaf_repo_manager_get_repo (seaf->repo_mgr, repo_id);
if (!repo) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "No such library");
return -1;
}
if (!repo->encrypted) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Repo not encrypted");
return -1;
}
if (repo->enc_version < 2) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,
"Unsupported enc version");
return -1;
}
if (repo->pwd_hash_algo) {
if (seafile_pwd_hash_verify_repo_passwd (repo->enc_version, repo_id, old_passwd, repo->salt,
repo->pwd_hash, repo->pwd_hash_algo, repo->pwd_hash_params) < 0) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Incorrect password");
return -1;
}
} else {
if (seafile_verify_repo_passwd (repo_id, old_passwd, repo->magic,
repo->enc_version, repo->salt) < 0) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Incorrect password");
return -1;
}
}
parent = seaf_commit_manager_get_commit (seaf->commit_mgr,
repo->id, repo->version,
repo->head->commit_id);
if (!parent) {
seaf_warning ("Failed to get commit %s:%s.\n",
repo->id, repo->head->commit_id);
ret = -1;
goto out;
}
char new_magic[65], new_pwd_hash[65], new_random_key[97];
if (repo->pwd_hash_algo) {
seafile_generate_pwd_hash (repo->enc_version, repo_id, new_passwd, repo->salt,
repo->pwd_hash_algo, repo->pwd_hash_params, new_pwd_hash);
} else {
seafile_generate_magic (repo->enc_version, repo_id, new_passwd, repo->salt,
new_magic);
}
if (seafile_update_random_key (old_passwd, repo->random_key,
new_passwd, new_random_key,
repo->enc_version, repo->salt) < 0) {
ret = -1;
goto out;
}
if (repo->pwd_hash_algo) {
memcpy (repo->pwd_hash, new_pwd_hash, 64);
} else {
memcpy (repo->magic, new_magic, 64);
}
memcpy (repo->random_key, new_random_key, 96);
commit = seaf_commit_new (NULL,
repo->id,
parent->root_id,
user,
EMPTY_SHA1,
"Changed library password",
0);
commit->parent_id = g_strdup(parent->commit_id);
seaf_repo_to_commit (repo, commit);
if (seaf_commit_manager_add_commit (seaf->commit_mgr, commit) < 0) {
ret = -1;
goto out;
}
seaf_branch_set_commit (repo->head, commit->commit_id);
if (seaf_branch_manager_test_and_update_branch (seaf->branch_mgr,
repo->head,
parent->commit_id,
FALSE, NULL, NULL, NULL) < 0) {
seaf_repo_unref (repo);
seaf_commit_unref (commit);
seaf_commit_unref (parent);
repo = NULL;
commit = NULL;
parent = NULL;
goto retry;
}
if (seaf_passwd_manager_is_passwd_set (seaf->passwd_mgr, repo_id, user))
seaf_passwd_manager_set_passwd (seaf->passwd_mgr, repo_id,
user, new_passwd, error);
out:
seaf_commit_unref (commit);
seaf_commit_unref (parent);
seaf_repo_unref (repo);
return ret;
}
static void
set_pwd_hash_to_commit (SeafCommit *commit,
SeafRepo *repo,
const char *pwd_hash,
const char *pwd_hash_algo,
const char *pwd_hash_params)
{
commit->repo_name = g_strdup (repo->name);
commit->repo_desc = g_strdup (repo->desc);
commit->encrypted = repo->encrypted;
commit->repaired = repo->repaired;
if (commit->encrypted) {
commit->enc_version = repo->enc_version;
if (commit->enc_version == 2) {
commit->random_key = g_strdup (repo->random_key);
} else if (commit->enc_version == 3) {
commit->random_key = g_strdup (repo->random_key);
commit->salt = g_strdup (repo->salt);
} else if (commit->enc_version == 4) {
commit->random_key = g_strdup (repo->random_key);
commit->salt = g_strdup (repo->salt);
}
commit->pwd_hash = g_strdup (pwd_hash);
commit->pwd_hash_algo = g_strdup (pwd_hash_algo);
commit->pwd_hash_params = g_strdup (pwd_hash_params);
}
commit->no_local_history = repo->no_local_history;
commit->version = repo->version;
}
int
seafile_upgrade_repo_pwd_hash_algorithm (const char *repo_id,
const char *user,
const char *passwd,
const char *pwd_hash_algo,
const char *pwd_hash_params,
GError **error)
{
SeafRepo *repo = NULL;
SeafCommit *commit = NULL, *parent = NULL;
int ret = 0;
if (!user) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,
"No user given");
return -1;
}
if (!passwd || passwd[0] == 0) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,
"Empty passwd");
return -1;
}
if (!is_uuid_valid (repo_id)) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid repo id");
return -1;
}
if (!pwd_hash_algo) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid pwd hash algorithm");
return -1;
}
if (g_strcmp0 (pwd_hash_algo, PWD_HASH_PDKDF2) != 0 &&
g_strcmp0 (pwd_hash_algo, PWD_HASH_ARGON2ID) != 0) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Unsupported pwd hash algorithm");
return -1;
}
if (!pwd_hash_params) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid pwd hash params");
return -1;
}
retry:
repo = seaf_repo_manager_get_repo (seaf->repo_mgr, repo_id);
if (!repo) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "No such library");
return -1;
}
if (g_strcmp0 (pwd_hash_algo, repo->pwd_hash_algo) == 0 &&
g_strcmp0 (pwd_hash_params, repo->pwd_hash_params) == 0) {
goto out;
}
if (!repo->encrypted) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Repo not encrypted");
ret = -1;
goto out;
}
if (repo->pwd_hash_algo) {
if (seafile_pwd_hash_verify_repo_passwd (repo->enc_version, repo_id, passwd, repo->salt,
repo->pwd_hash, repo->pwd_hash_algo, repo->pwd_hash_params) < 0) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Incorrect password");
ret = 1;
goto out;
}
} else {
if (seafile_verify_repo_passwd (repo_id, passwd, repo->magic,
repo->enc_version, repo->salt) < 0) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Incorrect password");
ret = -1;
goto out;
}
}
parent = seaf_commit_manager_get_commit (seaf->commit_mgr,
repo->id, repo->version,
repo->head->commit_id);
if (!parent) {
seaf_warning ("Failed to get commit %s:%s.\n",
repo->id, repo->head->commit_id);
ret = -1;
goto out;
}
char new_pwd_hash[65]= {0};
seafile_generate_pwd_hash (repo->enc_version, repo_id, passwd, repo->salt,
pwd_hash_algo, pwd_hash_params, new_pwd_hash);
// To prevent clients that have already synced this repo from overwriting the modified encryption algorithm,
// delete all sync tokens.
if (seaf_delete_repo_tokens (repo) < 0) {
seaf_warning ("Failed to delete repo sync tokens, abort change pwd hash algorithm.\n");
ret = -1;
goto out;
}
memcpy (repo->pwd_hash, new_pwd_hash, 64);
commit = seaf_commit_new (NULL,
repo->id,
parent->root_id,
user,
EMPTY_SHA1,
"Changed library password hash algorithm",
0);
commit->parent_id = g_strdup(parent->commit_id);
set_pwd_hash_to_commit (commit, repo, new_pwd_hash, pwd_hash_algo, pwd_hash_params);
if (seaf_commit_manager_add_commit (seaf->commit_mgr, commit) < 0) {
ret = -1;
goto out;
}
seaf_branch_set_commit (repo->head, commit->commit_id);
if (seaf_branch_manager_test_and_update_branch (seaf->branch_mgr,
repo->head,
parent->commit_id,
FALSE, NULL, NULL, NULL) < 0) {
seaf_repo_unref (repo);
seaf_commit_unref (commit);
seaf_commit_unref (parent);
repo = NULL;
commit = NULL;
parent = NULL;
goto retry;
}
if (seaf_passwd_manager_is_passwd_set (seaf->passwd_mgr, repo_id, user))
seaf_passwd_manager_set_passwd (seaf->passwd_mgr, repo_id,
user, passwd, error);
out:
seaf_commit_unref (commit);
seaf_commit_unref (parent);
seaf_repo_unref (repo);
return ret;
}
int
seafile_is_repo_owner (const char *email,
const char *repo_id,
GError **error)
{
if (!is_uuid_valid (repo_id)) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid repo id");
return 0;
}
char *owner = seaf_repo_manager_get_repo_owner (seaf->repo_mgr, repo_id);
if (!owner) {
/* seaf_warning ("Failed to get owner info for repo %s.\n", repo_id); */
return 0;
}
if (strcmp(owner, email) != 0) {
g_free (owner);
return 0;
}
g_free (owner);
return 1;
}
int
seafile_set_repo_owner(const char *repo_id, const char *email,
GError **error)
{
if (!repo_id || !email) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Argument should not be null");
return -1;
}
if (!is_uuid_valid (repo_id)) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid repo id");
return -1;
}
return seaf_repo_manager_set_repo_owner(seaf->repo_mgr, repo_id, email);
}
char *
seafile_get_repo_owner (const char *repo_id, GError **error)
{
if (!repo_id) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Argument should not be null");
return NULL;
}
if (!is_uuid_valid (repo_id)) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid repo id");
return NULL;
}
char *owner = seaf_repo_manager_get_repo_owner (seaf->repo_mgr, repo_id);
/* if (!owner){ */
/* seaf_warning ("Failed to get repo owner for repo %s.\n", repo_id); */
/* } */
return owner;
}
GList *
seafile_get_orphan_repo_list(GError **error)
{
GList *ret = NULL;
GList *repos, *ptr;
repos = seaf_repo_manager_get_orphan_repo_list(seaf->repo_mgr);
ret = convert_repo_list (repos);
for (ptr = repos; ptr; ptr = ptr->next) {
seaf_repo_unref ((SeafRepo *)ptr->data);
}
g_list_free (repos);
return ret;
}
GList *
seafile_list_owned_repos (const char *email, int ret_corrupted,
int start, int limit, GError **error)
{
GList *ret = NULL;
GList *repos, *ptr;
repos = seaf_repo_manager_get_repos_by_owner (seaf->repo_mgr, email, ret_corrupted,
start, limit, NULL);
ret = convert_repo_list (repos);
/* for (ptr = ret; ptr; ptr = ptr->next) { */
/* g_object_get (ptr->data, "repo_id", &repo_id, NULL); */
/* is_shared = seaf_share_manager_is_repo_shared (seaf->share_mgr, repo_id); */
/* if (is_shared < 0) { */
/* g_free (repo_id); */
/* break; */
/* } else { */
/* g_object_set (ptr->data, "is_shared", is_shared, NULL); */
/* g_free (repo_id); */
/* } */
/* } */
/* while (ptr) { */
/* g_object_set (ptr->data, "is_shared", FALSE, NULL); */
/* ptr = ptr->prev; */
/* } */
for(ptr = repos; ptr; ptr = ptr->next) {
seaf_repo_unref ((SeafRepo *)ptr->data);
}
g_list_free (repos);
return ret;
}
GList *
seafile_search_repos_by_name (const char *name, GError **error)
{
GList *ret = NULL;
GList *repos, *ptr;
repos = seaf_repo_manager_search_repos_by_name (seaf->repo_mgr, name);
ret = convert_repo_list (repos);
for (ptr = repos; ptr; ptr = ptr->next) {
seaf_repo_unref ((SeafRepo *)ptr->data);
}
g_list_free (repos);
return g_list_reverse(ret);
}
gint64
seafile_get_user_quota_usage (const char *email, GError **error)
{
gint64 ret;
if (!email) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Bad user id");
return -1;
}
ret = seaf_quota_manager_get_user_usage (seaf->quota_mgr, email);
if (ret < 0) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "Internal server error");
return -1;
}
return ret;
}
gint64
seafile_get_user_share_usage (const char *email, GError **error)
{
gint64 ret;
if (!email) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Bad user id");
return -1;
}
ret = seaf_quota_manager_get_user_share_usage (seaf->quota_mgr, email);
if (ret < 0) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "Internal server error");
return -1;
}
return ret;
}
gint64
seafile_server_repo_size(const char *repo_id, GError **error)
{
gint64 ret;
if (!repo_id || strlen(repo_id) != 36) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Bad repo id");
return -1;
}
if (!is_uuid_valid (repo_id)) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid repo id");
return -1;
}
ret = seaf_repo_manager_get_repo_size (seaf->repo_mgr, repo_id);
if (ret < 0) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "Internal server error");
return -1;
}
return ret;
}
int
seafile_set_repo_history_limit (const char *repo_id,
int days,
GError **error)
{
if (!repo_id || !is_uuid_valid (repo_id)) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid repo id");
return -1;
}
if (!is_uuid_valid (repo_id)) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid repo id");
return -1;
}
if (seaf_repo_manager_set_repo_history_limit (seaf->repo_mgr,
repo_id,
days) < 0) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_INTERNAL, "DB Error");
return -1;
}
return 0;
}
int
seafile_get_repo_history_limit (const char *repo_id,
GError **error)
{
if (!repo_id || !is_uuid_valid (repo_id)) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid repo id");
return -1;
}
if (!is_uuid_valid (repo_id)) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid repo id");
return -1;
}
return seaf_repo_manager_get_repo_history_limit (seaf->repo_mgr, repo_id);
}
int
seafile_set_repo_valid_since (const char *repo_id,
gint64 timestamp,
GError **error)
{
return seaf_repo_manager_set_repo_valid_since (seaf->repo_mgr,
repo_id,
timestamp);
}
int
seafile_repo_set_access_property (const char *repo_id, const char *ap, GError **error)
{
int ret;
if (!repo_id) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Argument should not be null");
return -1;
}
if (strlen(repo_id) != 36) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Wrong repo id");
return -1;
}
if (g_strcmp0(ap, "public") != 0 && g_strcmp0(ap, "own") != 0 && g_strcmp0(ap, "private") != 0) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Wrong access property");
return -1;
}
ret = seaf_repo_manager_set_access_property (seaf->repo_mgr, repo_id, ap);
if (ret < 0) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "Internal server error");
return -1;
}
return ret;
}
char *
seafile_repo_query_access_property (const char *repo_id, GError **error)
{
char *ret;
if (!repo_id) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Argument should not be null");
return NULL;
}
if (strlen(repo_id) != 36) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Wrong repo id");
return NULL;
}
ret = seaf_repo_manager_query_access_property (seaf->repo_mgr, repo_id);
return ret;
}
char *
seafile_web_get_access_token (const char *repo_id,
const char *obj_id,
const char *op,
const char *username,
int use_onetime,
GError **error)
{
char *token;
if (!repo_id || !obj_id || !op || !username) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Missing args");
return NULL;
}
token = seaf_web_at_manager_get_access_token (seaf->web_at_mgr,
repo_id, obj_id, op,
username, use_onetime, error);
return token;
}
GObject *
seafile_web_query_access_token (const char *token, GError **error)
{
SeafileWebAccess *webaccess = NULL;
if (!token) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,
"Token should not be null");
return NULL;
}
webaccess = seaf_web_at_manager_query_access_token (seaf->web_at_mgr,
token);
if (webaccess)
return (GObject *)webaccess;
return NULL;
}
char *
seafile_query_zip_progress (const char *token, GError **error)
{
#ifdef HAVE_EVHTP
return zip_download_mgr_query_zip_progress (seaf->zip_download_mgr,
token, error);
#else
return NULL;
#endif
}
int
seafile_cancel_zip_task (const char *token, GError **error)
{
#ifdef HAVE_EVHTP
return zip_download_mgr_cancel_zip_task (seaf->zip_download_mgr,
token);
#else
return 0;
#endif
}
int
seafile_add_share (const char *repo_id, const char *from_email,
const char *to_email, const char *permission, GError **error)
{
int ret;
if (!repo_id || !from_email || !to_email || !permission) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Missing args");
return -1;
}
if (!is_uuid_valid (repo_id)) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,
"Invalid repo_id parameter");
return -1;
}
if (g_strcmp0 (from_email, to_email) == 0) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,
"Can not share repo to myself");
return -1;
}
if (!is_permission_valid (permission)) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,
"Invalid permission parameter");
return -1;
}
ret = seaf_share_manager_add_share (seaf->share_mgr, repo_id, from_email,
to_email, permission);
return ret;
}
GList *
seafile_list_share_repos (const char *email, const char *type,
int start, int limit, GError **error)
{
if (g_strcmp0 (type, "from_email") != 0 &&
g_strcmp0 (type, "to_email") != 0 ) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,
"Wrong type argument");
return NULL;
}
return seaf_share_manager_list_share_repos (seaf->share_mgr,
email, type,
start, limit,
NULL);
}
GList *
seafile_list_repo_shared_to (const char *from_user, const char *repo_id,
GError **error)
{
if (!from_user || !repo_id) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Missing args");
return NULL;
}
if (!is_uuid_valid (repo_id)) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid repo id");
return NULL;
}
return seaf_share_manager_list_repo_shared_to (seaf->share_mgr,
from_user, repo_id,
error);
}
char *
seafile_share_subdir_to_user (const char *repo_id,
const char *path,
const char *owner,
const char *share_user,
const char *permission,
const char *passwd,
GError **error)
{
if (is_empty_string (repo_id) || !is_uuid_valid (repo_id)) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,
"Invalid repo_id parameter");
return NULL;
}
if (is_empty_string (path) || strcmp (path, "/") == 0) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,
"Invalid path parameter");
return NULL;
}
if (is_empty_string (owner)) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,
"Invalid owner parameter");
return NULL;
}
if (is_empty_string (share_user)) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,
"Invalid share_user parameter");
return NULL;
}
if (strcmp (owner, share_user) == 0) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,
"Can't share subdir to myself");
return NULL;
}
if (!is_permission_valid (permission)) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,
"Invalid permission parameter");
return NULL;
}
char *real_path;
char *vrepo_name;
char *vrepo_id;
char *ret = NULL;
real_path = format_dir_path (path);
// Use subdir name as virtual repo name and description
vrepo_name = g_path_get_basename (real_path);
vrepo_id = seaf_repo_manager_create_virtual_repo (seaf->repo_mgr,
repo_id, real_path,
vrepo_name, vrepo_name,
owner, passwd, error);
if (!vrepo_id)
goto out;
int result = seaf_share_manager_add_share (seaf->share_mgr, vrepo_id, owner,
share_user, permission);
if (result < 0) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL,
"Failed to share subdir to user");
g_free (vrepo_id);
}
else
ret = vrepo_id;
out:
g_free (vrepo_name);
g_free (real_path);
return ret;
}
int
seafile_unshare_subdir_for_user (const char *repo_id,
const char *path,
const char *owner,
const char *share_user,
GError **error)
{
if (is_empty_string (repo_id) || !is_uuid_valid (repo_id)) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,
"Invalid repo_id parameter");
return -1;
}
if (is_empty_string (path) || strcmp (path, "/") == 0) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,
"Invalid path parameter");
return -1;
}
if (is_empty_string (owner)) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,
"Invalid owner parameter");
return -1;
}
if (is_empty_string (share_user) ||
strcmp (owner, share_user) == 0) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,
"Invalid share_user parameter");
return -1;
}
char *real_path;
int ret = 0;
real_path = format_dir_path (path);
ret = seaf_share_manager_unshare_subdir (seaf->share_mgr,
repo_id, real_path, owner, share_user);
if (ret < 0) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL,
"Failed to unshare subdir for user");
}
g_free (real_path);
return ret;
}
int
seafile_update_share_subdir_perm_for_user (const char *repo_id,
const char *path,
const char *owner,
const char *share_user,
const char *permission,
GError **error)
{
if (is_empty_string (repo_id) || !is_uuid_valid (repo_id)) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,
"Invalid repo_id parameter");
return -1;
}
if (is_empty_string (path) || strcmp (path, "/") == 0) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,
"Invalid path parameter");
return -1;
}
if (is_empty_string (owner)) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,
"Invalid owner parameter");
return -1;
}
if (is_empty_string (share_user) ||
strcmp (owner, share_user) == 0) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,
"Invalid share_user parameter");
return -1;
}
if (!is_permission_valid (permission)) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,
"Invalid permission parameter");
return -1;
}
char *real_path;
int ret = 0;
real_path = format_dir_path (path);
ret = seaf_share_manager_set_subdir_perm_by_path (seaf->share_mgr,
repo_id, owner, share_user,
permission, real_path);
if (ret < 0) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL,
"Failed to update share subdir permission for user");
}
g_free (real_path);
return ret;
}
GList *
seafile_list_repo_shared_group (const char *from_user, const char *repo_id,
GError **error)
{
if (!from_user || !repo_id) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Missing args");
return NULL;
}
if (!is_uuid_valid (repo_id)) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid repo id");
return NULL;
}
return seaf_share_manager_list_repo_shared_group (seaf->share_mgr,
from_user, repo_id,
error);
}
int
seafile_remove_share (const char *repo_id, const char *from_email,
const char *to_email, GError **error)
{
int ret;
if (!repo_id || !from_email ||!to_email) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Missing args");
return -1;
}
ret = seaf_share_manager_remove_share (seaf->share_mgr, repo_id, from_email,
to_email);
return ret;
}
/* Group repo RPC. */
int
seafile_group_share_repo (const char *repo_id, int group_id,
const char *user_name, const char *permission,
GError **error)
{
SeafRepoManager *mgr = seaf->repo_mgr;
int ret;
if (group_id <= 0 || !user_name || !repo_id || !permission) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,
"Bad input argument");
return -1;
}
if (!is_uuid_valid (repo_id)) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid repo id");
return -1;
}
if (!is_permission_valid (permission)) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,
"Invalid permission parameter");
return -1;
}
ret = seaf_repo_manager_add_group_repo (mgr, repo_id, group_id, user_name,
permission, error);
return ret;
}
int
seafile_group_unshare_repo (const char *repo_id, int group_id,
const char *user_name, GError **error)
{
SeafRepoManager *mgr = seaf->repo_mgr;
int ret;
if (!user_name || !repo_id) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,
"User name and repo id can not be NULL");
return -1;
}
if (!is_uuid_valid (repo_id)) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid repo id");
return -1;
}
ret = seaf_repo_manager_del_group_repo (mgr, repo_id, group_id, error);
return ret;
}
char *
seafile_share_subdir_to_group (const char *repo_id,
const char *path,
const char *owner,
int share_group,
const char *permission,
const char *passwd,
GError **error)
{
if (is_empty_string (repo_id) || !is_uuid_valid (repo_id)) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,
"Invalid repo_id parameter");
return NULL;
}
if (is_empty_string (path) || strcmp (path, "/") == 0) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,
"Invalid path parameter");
return NULL;
}
if (is_empty_string (owner)) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,
"Invalid owner parameter");
return NULL;
}
if (share_group < 0) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,
"Invalid share_group parameter");
return NULL;
}
if (!is_permission_valid (permission)) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,
"Invalid permission parameter");
return NULL;
}
char *real_path;
char *vrepo_name;
char *vrepo_id;
char* ret = NULL;
real_path = format_dir_path (path);
// Use subdir name as virtual repo name and description
vrepo_name = g_path_get_basename (real_path);
vrepo_id = seaf_repo_manager_create_virtual_repo (seaf->repo_mgr,
repo_id, real_path,
vrepo_name, vrepo_name,
owner, passwd, error);
if (!vrepo_id)
goto out;
int result = seaf_repo_manager_add_group_repo (seaf->repo_mgr, vrepo_id, share_group,
owner, permission, error);
if (result < 0) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL,
"Failed to share subdir to group");
g_free (vrepo_id);
}
else
ret = vrepo_id;
out:
g_free (vrepo_name);
g_free (real_path);
return ret;
}
int
seafile_unshare_subdir_for_group (const char *repo_id,
const char *path,
const char *owner,
int share_group,
GError **error)
{
if (is_empty_string (repo_id) || !is_uuid_valid (repo_id)) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,
"Invalid repo_id parameter");
return -1;
}
if (is_empty_string (path) || strcmp (path, "/") == 0) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,
"Invalid path parameter");
return -1;
}
if (is_empty_string (owner)) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,
"Invalid owner parameter");
return -1;
}
if (share_group < 0) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,
"Invalid share_group parameter");
return -1;
}
char *real_path;
int ret = 0;
real_path = format_dir_path (path);
ret = seaf_share_manager_unshare_group_subdir (seaf->share_mgr, repo_id,
real_path, owner, share_group);
if (ret < 0) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL,
"Failed to unshare subdir for group");
}
g_free (real_path);
return ret;
}
int
seafile_update_share_subdir_perm_for_group (const char *repo_id,
const char *path,
const char *owner,
int share_group,
const char *permission,
GError **error)
{
if (is_empty_string (repo_id) || !is_uuid_valid (repo_id)) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,
"Invalid repo_id parameter");
return -1;
}
if (is_empty_string (path) || strcmp (path, "/") == 0) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,
"Invalid path parameter");
return -1;
}
if (is_empty_string (owner)) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,
"Invalid owner parameter");
return -1;
}
if (share_group < 0) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,
"Invalid share_group parameter");
return -1;
}
if (!is_permission_valid (permission)) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,
"Invalid permission parameter");
return -1;
}
char *real_path;
int ret = 0;
real_path = format_dir_path (path);
ret = seaf_repo_manager_set_subdir_group_perm_by_path (seaf->repo_mgr,
repo_id, owner, share_group,
permission, real_path);
if (ret < 0) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL,
"Failed to update share subdir permission for group");
}
g_free (real_path);
return ret;
}
char *
seafile_get_shared_groups_by_repo(const char *repo_id, GError **error)
{
SeafRepoManager *mgr = seaf->repo_mgr;
GList *group_ids = NULL, *ptr;
GString *result;
if (!repo_id) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,
"Bad arguments");
return NULL;
}
if (!is_uuid_valid (repo_id)) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid repo id");
return NULL;
}
group_ids = seaf_repo_manager_get_groups_by_repo (mgr, repo_id, error);
if (!group_ids) {
return NULL;
}
result = g_string_new("");
ptr = group_ids;
while (ptr) {
g_string_append_printf (result, "%d\n", (int)(long)ptr->data);
ptr = ptr->next;
}
g_list_free (group_ids);
return g_string_free (result, FALSE);
}
char *
seafile_get_group_repoids (int group_id, GError **error)
{
SeafRepoManager *mgr = seaf->repo_mgr;
GList *repo_ids = NULL, *ptr;
GString *result;
repo_ids = seaf_repo_manager_get_group_repoids (mgr, group_id, error);
if (!repo_ids) {
return NULL;
}
result = g_string_new("");
ptr = repo_ids;
while (ptr) {
g_string_append_printf (result, "%s\n", (char *)ptr->data);
g_free (ptr->data);
ptr = ptr->next;
}
g_list_free (repo_ids);
return g_string_free (result, FALSE);
}
GList *
seafile_get_repos_by_group (int group_id, GError **error)
{
SeafRepoManager *mgr = seaf->repo_mgr;
GList *ret = NULL;
if (group_id < 0) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,
"Invalid group id.");
return NULL;
}
ret = seaf_repo_manager_get_repos_by_group (mgr, group_id, error);
return ret;
}
GList *
seafile_get_group_repos_by_owner (char *user, GError **error)
{
SeafRepoManager *mgr = seaf->repo_mgr;
GList *ret = NULL;
if (!user) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,
"user name can not be NULL");
return NULL;
}
ret = seaf_repo_manager_get_group_repos_by_owner (mgr, user, error);
if (!ret) {
return NULL;
}
return g_list_reverse (ret);
}
char *
seafile_get_group_repo_owner (const char *repo_id, GError **error)
{
SeafRepoManager *mgr = seaf->repo_mgr;
GString *result = g_string_new ("");
if (!is_uuid_valid (repo_id)) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid repo id");
return NULL;
}
char *share_from = seaf_repo_manager_get_group_repo_owner (mgr, repo_id,
error);
if (share_from) {
g_string_append_printf (result, "%s", share_from);
g_free (share_from);
}
return g_string_free (result, FALSE);
}
int
seafile_remove_repo_group(int group_id, const char *username, GError **error)
{
if (group_id <= 0) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,
"Wrong group id argument");
return -1;
}
return seaf_repo_manager_remove_group_repos (seaf->repo_mgr,
group_id, username,
error);
}
/* Inner public repo RPC */
int
seafile_set_inner_pub_repo (const char *repo_id,
const char *permission,
GError **error)
{
if (!repo_id) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Bad args");
return -1;
}
if (!is_uuid_valid (repo_id)) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid repo id");
return -1;
}
if (seaf_repo_manager_set_inner_pub_repo (seaf->repo_mgr,
repo_id, permission) < 0) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "Internal error");
return -1;
}
return 0;
}
int
seafile_unset_inner_pub_repo (const char *repo_id, GError **error)
{
if (!repo_id) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Bad args");
return -1;
}
if (!is_uuid_valid (repo_id)) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid repo id");
return -1;
}
if (seaf_repo_manager_unset_inner_pub_repo (seaf->repo_mgr, repo_id) < 0) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "Internal error");
return -1;
}
return 0;
}
GList *
seafile_list_inner_pub_repos (GError **error)
{
return seaf_repo_manager_list_inner_pub_repos (seaf->repo_mgr, NULL);
}
gint64
seafile_count_inner_pub_repos (GError **error)
{
return seaf_repo_manager_count_inner_pub_repos (seaf->repo_mgr);
}
GList *
seafile_list_inner_pub_repos_by_owner (const char *user, GError **error)
{
if (!user) {
g_set_error (error, 0, SEAF_ERR_BAD_ARGS, "Bad arguments");
return NULL;
}
return seaf_repo_manager_list_inner_pub_repos_by_owner (seaf->repo_mgr, user);
}
int
seafile_is_inner_pub_repo (const char *repo_id, GError **error)
{
if (!repo_id) {
g_set_error (error, 0, SEAF_ERR_BAD_ARGS, "Bad arguments");
return -1;
}
if (!is_uuid_valid (repo_id)) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid repo id");
return -1;
}
return seaf_repo_manager_is_inner_pub_repo (seaf->repo_mgr, repo_id);
}
gint64
seafile_get_file_size (const char *store_id, int version,
const char *file_id, GError **error)
{
gint64 file_size;
if (!store_id || !is_uuid_valid(store_id)) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,
"Invalid store id");
return -1;
}
if (!file_id || !is_object_id_valid (file_id)) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,
"Invalid file id");
return -1;
}
file_size = seaf_fs_manager_get_file_size (seaf->fs_mgr, store_id, version, file_id);
if (file_size < 0) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_INTERNAL,
"failed to read file size");
return -1;
}
return file_size;
}
gint64
seafile_get_dir_size (const char *store_id, int version,
const char *dir_id, GError **error)
{
gint64 dir_size;
if (!store_id || !is_uuid_valid (store_id)) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,
"Invalid store id");
return -1;
}
if (!dir_id || !is_object_id_valid (dir_id)) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,
"Invalid dir id");
return -1;
}
dir_size = seaf_fs_manager_get_fs_size (seaf->fs_mgr, store_id, version, dir_id);
if (dir_size < 0) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,
"Failed to caculate dir size");
return -1;
}
return dir_size;
}
int
seafile_check_passwd (const char *repo_id,
const char *magic,
GError **error)
{
if (!repo_id || strlen(repo_id) != 36 || !magic) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,
"Bad arguments");
return -1;
}
if (seaf_passwd_manager_check_passwd (seaf->passwd_mgr,
repo_id, magic,
error) < 0) {
return -1;
}
return 0;
}
int
seafile_set_passwd (const char *repo_id,
const char *user,
const char *passwd,
GError **error)
{
if (!repo_id || strlen(repo_id) != 36 || !user || !passwd) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,
"Bad arguments");
return -1;
}
if (seaf_passwd_manager_set_passwd (seaf->passwd_mgr,
repo_id, user, passwd,
error) < 0) {
return -1;
}
return 0;
}
int
seafile_unset_passwd (const char *repo_id,
const char *user,
GError **error)
{
if (!repo_id || strlen(repo_id) != 36 || !user) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,
"Bad arguments");
return -1;
}
if (seaf_passwd_manager_unset_passwd (seaf->passwd_mgr,
repo_id, user,
error) < 0) {
return -1;
}
return 0;
}
int
seafile_is_passwd_set (const char *repo_id, const char *user, GError **error)
{
if (!repo_id || strlen(repo_id) != 36 || !user) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,
"Bad arguments");
return -1;
}
return seaf_passwd_manager_is_passwd_set (seaf->passwd_mgr,
repo_id, user);
}
GObject *
seafile_get_decrypt_key (const char *repo_id, const char *user, GError **error)
{
SeafileCryptKey *ret;
if (!repo_id || strlen(repo_id) != 36 || !user) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,
"Bad arguments");
return NULL;
}
ret = seaf_passwd_manager_get_decrypt_key (seaf->passwd_mgr,
repo_id, user);
if (!ret) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL,
"Password was not set");
return NULL;
}
return (GObject *)ret;
}
int
seafile_revert_on_server (const char *repo_id,
const char *commit_id,
const char *user_name,
GError **error)
{
if (!repo_id || strlen(repo_id) != 36 ||
!commit_id || strlen(commit_id) != 40 ||
!user_name) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,
"Bad arguments");
return -1;
}
if (!is_uuid_valid (repo_id)) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid repo id");
return -1;
}
if (!is_object_id_valid (commit_id)) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid commit id");
return -1;
}
return seaf_repo_manager_revert_on_server (seaf->repo_mgr,
repo_id,
commit_id,
user_name,
error);
}
int
seafile_post_file (const char *repo_id, const char *temp_file_path,
const char *parent_dir, const char *file_name,
const char *user,
GError **error)
{
char *norm_parent_dir = NULL, *norm_file_name = NULL, *rpath = NULL;
int ret = 0;
if (!repo_id || !temp_file_path || !parent_dir || !file_name || !user) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,
"Argument should not be null");
return -1;
}
if (!is_uuid_valid (repo_id)) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid repo id");
return -1;
}
norm_parent_dir = normalize_utf8_path (parent_dir);
if (!norm_parent_dir) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,
"Path is in valid UTF8 encoding");
ret = -1;
goto out;
}
norm_file_name = normalize_utf8_path (file_name);
if (!norm_file_name) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,
"Path is in valid UTF8 encoding");
ret = -1;
goto out;
}
rpath = format_dir_path (norm_parent_dir);
if (seaf_repo_manager_post_file (seaf->repo_mgr, repo_id,
temp_file_path, rpath,
norm_file_name, user,
error) < 0) {
ret = -1;
}
out:
g_free (norm_parent_dir);
g_free (norm_file_name);
g_free (rpath);
return ret;
}
/* char * */
/* seafile_post_file_blocks (const char *repo_id, */
/* const char *parent_dir, */
/* const char *file_name, */
/* const char *blockids_json, */
/* const char *paths_json, */
/* const char *user, */
/* gint64 file_size, */
/* int replace_existed, */
/* GError **error) */
/* { */
/* char *norm_parent_dir = NULL, *norm_file_name = NULL, *rpath = NULL; */
/* char *new_id = NULL; */
/* if (!repo_id || !parent_dir || !file_name */
/* || !blockids_json || ! paths_json || !user || file_size < 0) { */
/* g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, */
/* "Argument should not be null"); */
/* return NULL; */
/* } */
/* if (!is_uuid_valid (repo_id)) { */
/* g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid repo id"); */
/* return NULL; */
/* } */
/* norm_parent_dir = normalize_utf8_path (parent_dir); */
/* if (!norm_parent_dir) { */
/* g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, */
/* "Path is in valid UTF8 encoding"); */
/* goto out; */
/* } */
/* norm_file_name = normalize_utf8_path (file_name); */
/* if (!norm_file_name) { */
/* g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, */
/* "Path is in valid UTF8 encoding"); */
/* goto out; */
/* } */
/* rpath = format_dir_path (norm_parent_dir); */
/* seaf_repo_manager_post_file_blocks (seaf->repo_mgr, */
/* repo_id, */
/* rpath, */
/* norm_file_name, */
/* blockids_json, */
/* paths_json, */
/* user, */
/* file_size, */
/* replace_existed, */
/* &new_id, */
/* error); */
/* out: */
/* g_free (norm_parent_dir); */
/* g_free (norm_file_name); */
/* g_free (rpath); */
/* return new_id; */
/* } */
char *
seafile_post_multi_files (const char *repo_id,
const char *parent_dir,
const char *filenames_json,
const char *paths_json,
const char *user,
int replace_existed,
GError **error)
{
char *norm_parent_dir = NULL, *rpath = NULL;
char *ret_json = NULL;
if (!repo_id || !filenames_json || !parent_dir || !paths_json || !user) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,
"Argument should not be null");
return NULL;
}
if (!is_uuid_valid (repo_id)) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid repo id");
return NULL;
}
norm_parent_dir = normalize_utf8_path (parent_dir);
if (!norm_parent_dir) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,
"Path is in valid UTF8 encoding");
goto out;
}
rpath = format_dir_path (norm_parent_dir);
seaf_repo_manager_post_multi_files (seaf->repo_mgr,
repo_id,
rpath,
filenames_json,
paths_json,
user,
replace_existed,
0,
&ret_json,
NULL,
error);
out:
g_free (norm_parent_dir);
g_free (rpath);
return ret_json;
}
char *
seafile_put_file (const char *repo_id, const char *temp_file_path,
const char *parent_dir, const char *file_name,
const char *user, const char *head_id,
GError **error)
{
char *norm_parent_dir = NULL, *norm_file_name = NULL, *rpath = NULL;
char *new_file_id = NULL;
if (!repo_id || !temp_file_path || !parent_dir || !file_name || !user) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,
"Argument should not be null");
return NULL;
}
if (!is_uuid_valid (repo_id)) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid repo id");
return NULL;
}
norm_parent_dir = normalize_utf8_path (parent_dir);
if (!norm_parent_dir) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,
"Path is in valid UTF8 encoding");
goto out;
}
norm_file_name = normalize_utf8_path (file_name);
if (!norm_file_name) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,
"Path is in valid UTF8 encoding");
goto out;
}
rpath = format_dir_path (norm_parent_dir);
seaf_repo_manager_put_file (seaf->repo_mgr, repo_id,
temp_file_path, rpath,
norm_file_name, user, head_id,
0,
&new_file_id, error);
out:
g_free (norm_parent_dir);
g_free (norm_file_name);
g_free (rpath);
return new_file_id;
}
/* char * */
/* seafile_put_file_blocks (const char *repo_id, const char *parent_dir, */
/* const char *file_name, const char *blockids_json, */
/* const char *paths_json, const char *user, */
/* const char *head_id, gint64 file_size, GError **error) */
/* { */
/* char *norm_parent_dir = NULL, *norm_file_name = NULL, *rpath = NULL; */
/* char *new_file_id = NULL; */
/* if (!repo_id || !parent_dir || !file_name */
/* || !blockids_json || ! paths_json || !user) { */
/* g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, */
/* "Argument should not be null"); */
/* return NULL; */
/* } */
/* if (!is_uuid_valid (repo_id)) { */
/* g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid repo id"); */
/* return NULL; */
/* } */
/* norm_parent_dir = normalize_utf8_path (parent_dir); */
/* if (!norm_parent_dir) { */
/* g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, */
/* "Path is in valid UTF8 encoding"); */
/* goto out; */
/* } */
/* norm_file_name = normalize_utf8_path (file_name); */
/* if (!norm_file_name) { */
/* g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, */
/* "Path is in valid UTF8 encoding"); */
/* goto out; */
/* } */
/* rpath = format_dir_path (norm_parent_dir); */
/* seaf_repo_manager_put_file_blocks (seaf->repo_mgr, repo_id, */
/* rpath, norm_file_name, */
/* blockids_json, paths_json, */
/* user, head_id, file_size, */
/* &new_file_id, error); */
/* out: */
/* g_free (norm_parent_dir); */
/* g_free (norm_file_name); */
/* g_free (rpath); */
/* return new_file_id; */
/* } */
int
seafile_post_dir (const char *repo_id, const char *parent_dir,
const char *new_dir_name, const char *user,
GError **error)
{
char *norm_parent_dir = NULL, *norm_dir_name = NULL, *rpath = NULL;
int ret = 0;
if (!repo_id || !parent_dir || !new_dir_name || !user) {
g_set_error (error, 0, SEAF_ERR_BAD_ARGS, "Argument should not be null");
return -1;
}
if (!is_uuid_valid (repo_id)) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid repo id");
return -1;
}
norm_parent_dir = normalize_utf8_path (parent_dir);
if (!norm_parent_dir) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,
"Path is in valid UTF8 encoding");
ret = -1;
goto out;
}
norm_dir_name = normalize_utf8_path (new_dir_name);
if (!norm_dir_name) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,
"Path is in valid UTF8 encoding");
ret = -1;
goto out;
}
rpath = format_dir_path (norm_parent_dir);
if (seaf_repo_manager_post_dir (seaf->repo_mgr, repo_id,
rpath, norm_dir_name,
user, error) < 0) {
ret = -1;
}
out:
g_free (norm_parent_dir);
g_free (norm_dir_name);
g_free (rpath);
return ret;
}
int
seafile_post_empty_file (const char *repo_id, const char *parent_dir,
const char *new_file_name, const char *user,
GError **error)
{
char *norm_parent_dir = NULL, *norm_file_name = NULL, *rpath = NULL;
int ret = 0;
if (!repo_id || !parent_dir || !new_file_name || !user) {
g_set_error (error, 0, SEAF_ERR_BAD_ARGS, "Argument should not be null");
return -1;
}
if (!is_uuid_valid (repo_id)) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid repo id");
return -1;
}
norm_parent_dir = normalize_utf8_path (parent_dir);
if (!norm_parent_dir) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,
"Path is in valid UTF8 encoding");
ret = -1;
goto out;
}
norm_file_name = normalize_utf8_path (new_file_name);
if (!norm_file_name) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,
"Path is in valid UTF8 encoding");
ret = -1;
goto out;
}
rpath = format_dir_path (norm_parent_dir);
if (seaf_repo_manager_post_empty_file (seaf->repo_mgr, repo_id,
rpath, norm_file_name,
user, error) < 0) {
ret = -1;
}
out:
g_free (norm_parent_dir);
g_free (norm_file_name);
g_free (rpath);
return ret;
}
int
seafile_del_file (const char *repo_id, const char *parent_dir,
const char *file_name, const char *user,
GError **error)
{
char *norm_parent_dir = NULL, *norm_file_name = NULL, *rpath = NULL;
int ret = 0;
if (!repo_id || !parent_dir || !file_name || !user) {
g_set_error (error, 0, SEAF_ERR_BAD_ARGS, "Argument should not be null");
return -1;
}
if (!is_uuid_valid (repo_id)) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid repo id");
return -1;
}
norm_parent_dir = normalize_utf8_path (parent_dir);
if (!norm_parent_dir) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,
"Path is in valid UTF8 encoding");
ret = -1;
goto out;
}
norm_file_name = normalize_utf8_path (file_name);
if (!norm_file_name) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,
"Path is in valid UTF8 encoding");
ret = -1;
goto out;
}
rpath = format_dir_path (norm_parent_dir);
if (seaf_repo_manager_del_file (seaf->repo_mgr, repo_id,
rpath, norm_file_name,
user, error) < 0) {
ret = -1;
}
out:
g_free (norm_parent_dir);
g_free (norm_file_name);
g_free (rpath);
return ret;
}
int
seafile_batch_del_files (const char *repo_id,
const char *filepaths,
const char *user,
GError **error)
{
char *norm_file_list = NULL, *rpath = NULL;
int ret = 0;
if (!repo_id || !filepaths || !user) {
g_set_error (error, 0, SEAF_ERR_BAD_ARGS, "Argument should not be null");
return -1;
}
if (!is_uuid_valid (repo_id)) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid repo id");
return -1;
}
norm_file_list = normalize_utf8_path (filepaths);
if (!norm_file_list) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,
"Path is in valid UTF8 encoding");
ret = -1;
goto out;
}
if (seaf_repo_manager_batch_del_files (seaf->repo_mgr, repo_id,
norm_file_list,
user, error) < 0) {
ret = -1;
}
out:
g_free (norm_file_list);
return ret;
}
GObject *
seafile_copy_file (const char *src_repo_id,
const char *src_dir,
const char *src_filename,
const char *dst_repo_id,
const char *dst_dir,
const char *dst_filename,
const char *user,
int need_progress,
int synchronous,
GError **error)
{
char *norm_src_dir = NULL, *norm_src_filename = NULL;
char *norm_dst_dir = NULL, *norm_dst_filename = NULL;
char *rsrc_dir = NULL, *rdst_dir = NULL;
GObject *ret = NULL;
if (!src_repo_id || !src_dir || !src_filename ||
!dst_repo_id || !dst_dir || !dst_filename || !user) {
g_set_error (error, 0, SEAF_ERR_BAD_ARGS, "Argument should not be null");
return NULL;
}
if (!is_uuid_valid (src_repo_id) || !is_uuid_valid(dst_repo_id)) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid repo id");
return NULL;
}
norm_src_dir = normalize_utf8_path (src_dir);
if (!norm_src_dir) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,
"Path is in valid UTF8 encoding");
goto out;
}
norm_src_filename = normalize_utf8_path (src_filename);
if (!norm_src_filename) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,
"Path is in valid UTF8 encoding");
goto out;
}
norm_dst_dir = normalize_utf8_path (dst_dir);
if (!norm_dst_dir) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,
"Path is in valid UTF8 encoding");
goto out;
}
norm_dst_filename = normalize_utf8_path (dst_filename);
if (!norm_dst_filename) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,
"Path is in valid UTF8 encoding");
goto out;
}
rsrc_dir = format_dir_path (norm_src_dir);
rdst_dir = format_dir_path (norm_dst_dir);
ret = (GObject *)seaf_repo_manager_copy_multiple_files (seaf->repo_mgr,
src_repo_id, rsrc_dir, norm_src_filename,
dst_repo_id, rdst_dir, norm_dst_filename,
user, need_progress, synchronous,
error);
out:
g_free (norm_src_dir);
g_free (norm_src_filename);
g_free (norm_dst_dir);
g_free (norm_dst_filename);
g_free (rsrc_dir);
g_free (rdst_dir);
return ret;
}
GObject *
seafile_move_file (const char *src_repo_id,
const char *src_dir,
const char *src_filename,
const char *dst_repo_id,
const char *dst_dir,
const char *dst_filename,
int replace,
const char *user,
int need_progress,
int synchronous,
GError **error)
{
char *norm_src_dir = NULL, *norm_src_filename = NULL;
char *norm_dst_dir = NULL, *norm_dst_filename = NULL;
char *rsrc_dir = NULL, *rdst_dir = NULL;
GObject *ret = NULL;
if (!src_repo_id || !src_dir || !src_filename ||
!dst_repo_id || !dst_dir || !dst_filename || !user) {
g_set_error (error, 0, SEAF_ERR_BAD_ARGS, "Argument should not be null");
return NULL;
}
if (!is_uuid_valid (src_repo_id) || !is_uuid_valid(dst_repo_id)) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid repo id");
return NULL;
}
norm_src_dir = normalize_utf8_path (src_dir);
if (!norm_src_dir) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,
"Path is in valid UTF8 encoding");
goto out;
}
norm_src_filename = normalize_utf8_path (src_filename);
if (!norm_src_filename) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,
"Path is in valid UTF8 encoding");
goto out;
}
norm_dst_dir = normalize_utf8_path (dst_dir);
if (!norm_dst_dir) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,
"Path is in valid UTF8 encoding");
goto out;
}
norm_dst_filename = normalize_utf8_path (dst_filename);
if (!norm_dst_filename) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,
"Path is in valid UTF8 encoding");
goto out;
}
rsrc_dir = format_dir_path (norm_src_dir);
rdst_dir = format_dir_path (norm_dst_dir);
ret = (GObject *)seaf_repo_manager_move_multiple_files (seaf->repo_mgr,
src_repo_id, rsrc_dir, norm_src_filename,
dst_repo_id, rdst_dir, norm_dst_filename,
replace, user, need_progress, synchronous,
error);
out:
g_free (norm_src_dir);
g_free (norm_src_filename);
g_free (norm_dst_dir);
g_free (norm_dst_filename);
g_free (rsrc_dir);
g_free (rdst_dir);
return ret;
}
GObject *
seafile_get_copy_task (const char *task_id, GError **error)
{
return (GObject *)seaf_copy_manager_get_task (seaf->copy_mgr, task_id);
}
int
seafile_cancel_copy_task (const char *task_id, GError **error)
{
return seaf_copy_manager_cancel_task (seaf->copy_mgr, task_id);
}
int
seafile_rename_file (const char *repo_id,
const char *parent_dir,
const char *oldname,
const char *newname,
const char *user,
GError **error)
{
char *norm_parent_dir = NULL, *norm_oldname = NULL, *norm_newname = NULL;
char *rpath = NULL;
int ret = 0;
if (!repo_id || !parent_dir || !oldname || !newname || !user) {
g_set_error (error, 0, SEAF_ERR_BAD_ARGS, "Argument should not be null");
return -1;
}
if (!is_uuid_valid (repo_id)) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid repo id");
return -1;
}
norm_parent_dir = normalize_utf8_path (parent_dir);
if (!norm_parent_dir) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,
"Path is in valid UTF8 encoding");
ret = -1;
goto out;
}
norm_oldname = normalize_utf8_path (oldname);
if (!norm_oldname) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,
"Path is in valid UTF8 encoding");
ret = -1;
goto out;
}
norm_newname = normalize_utf8_path (newname);
if (!norm_newname) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,
"Path is in valid UTF8 encoding");
ret = -1;
goto out;
}
rpath = format_dir_path (norm_parent_dir);
if (seaf_repo_manager_rename_file (seaf->repo_mgr, repo_id,
rpath, norm_oldname, norm_newname,
user, error) < 0) {
ret = -1;
}
out:
g_free (norm_parent_dir);
g_free (norm_oldname);
g_free (norm_newname);
g_free (rpath);
return ret;
}
int
seafile_is_valid_filename (const char *repo_id,
const char *filename,
GError **error)
{
if (!repo_id || !filename) {
g_set_error (error, 0, SEAF_ERR_BAD_ARGS, "Argument should not be null");
return -1;
}
int ret = seaf_repo_manager_is_valid_filename (seaf->repo_mgr,
repo_id,
filename,
error);
return ret;
}
char *
seafile_create_repo (const char *repo_name,
const char *repo_desc,
const char *owner_email,
const char *passwd,
int enc_version,
const char *pwd_hash_algo,
const char *pwd_hash_params,
GError **error)
{
if (!repo_name || !repo_desc || !owner_email) {
g_set_error (error, 0, SEAF_ERR_BAD_ARGS, "Argument should not be null");
return NULL;
}
char *repo_id;
repo_id = seaf_repo_manager_create_new_repo (seaf->repo_mgr,
repo_name, repo_desc,
owner_email,
passwd,
enc_version,
pwd_hash_algo,
pwd_hash_params,
error);
return repo_id;
}
char *
seafile_create_enc_repo (const char *repo_id,
const char *repo_name,
const char *repo_desc,
const char *owner_email,
const char *magic,
const char *random_key,
const char *salt,
int enc_version,
const char *pwd_hash,
const char *pwd_hash_algo,
const char *pwd_hash_params,
GError **error)
{
if (!repo_id || !repo_name || !repo_desc || !owner_email) {
g_set_error (error, 0, SEAF_ERR_BAD_ARGS, "Argument should not be null");
return NULL;
}
char *ret;
ret = seaf_repo_manager_create_enc_repo (seaf->repo_mgr,
repo_id, repo_name, repo_desc,
owner_email,
magic, random_key, salt,
enc_version,
pwd_hash, pwd_hash_algo, pwd_hash_params,
error);
return ret;
}
int
seafile_set_user_quota (const char *user, gint64 quota, GError **error)
{
if (!user) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,
"Bad arguments");
return -1;
}
return seaf_quota_manager_set_user_quota (seaf->quota_mgr, user, quota);
}
gint64
seafile_get_user_quota (const char *user, GError **error)
{
if (!user) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,
"Bad arguments");
return -1;
}
return seaf_quota_manager_get_user_quota (seaf->quota_mgr, user);
}
int
seafile_check_quota (const char *repo_id, gint64 delta, GError **error)
{
int rc;
if (!repo_id) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Bad arguments");
return -1;
}
rc = seaf_quota_manager_check_quota_with_delta (seaf->quota_mgr, repo_id, delta);
if (rc == 1)
return -1;
return rc;
}
GList *
seafile_list_user_quota_usage (GError **error)
{
return seaf_repo_quota_manager_list_user_quota_usage (seaf->quota_mgr);
}
static char *
get_obj_id_by_path (const char *repo_id,
const char *path,
gboolean want_dir,
GError **error)
{
SeafRepo *repo = NULL;
SeafCommit *commit = NULL;
char *obj_id = NULL;
if (!repo_id || !path) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,
"Bad arguments");
return NULL;
}
if (!is_uuid_valid (repo_id)) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid repo id");
return NULL;
}
repo = seaf_repo_manager_get_repo (seaf->repo_mgr, repo_id);
if (!repo) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_INTERNAL,
"Get repo error");
goto out;
}
commit = seaf_commit_manager_get_commit (seaf->commit_mgr,
repo->id, repo->version,
repo->head->commit_id);
if (!commit) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_INTERNAL,
"Get commit error");
goto out;
}
guint32 mode = 0;
obj_id = seaf_fs_manager_path_to_obj_id (seaf->fs_mgr,
repo->store_id, repo->version,
commit->root_id,
path, &mode, error);
out:
if (repo)
seaf_repo_unref (repo);
if (commit)
seaf_commit_unref (commit);
if (obj_id) {
/* check if the mode matches */
if ((want_dir && !S_ISDIR(mode)) || ((!want_dir) && S_ISDIR(mode))) {
g_free (obj_id);
return NULL;
}
}
return obj_id;
}
char *seafile_get_file_id_by_path (const char *repo_id,
const char *path,
GError **error)
{
if (!repo_id || !path) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,
"Bad arguments");
return NULL;
}
char *rpath = format_dir_path (path);
char *ret = get_obj_id_by_path (repo_id, rpath, FALSE, error);
g_free (rpath);
filter_error (error);
return ret;
}
char *seafile_get_dir_id_by_path (const char *repo_id,
const char *path,
GError **error)
{
if (!repo_id || !path) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,
"Bad arguments");
return NULL;
}
char *rpath = format_dir_path (path);
char *ret = get_obj_id_by_path (repo_id, rpath, TRUE, error);
g_free (rpath);
filter_error (error);
return ret;
}
GObject *
seafile_get_dirent_by_path (const char *repo_id, const char *path,
GError **error)
{
if (!repo_id || !path) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,
"Bad arguments");
return NULL;
}
if (!is_uuid_valid (repo_id)) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,
"invalid repo id");
return NULL;
}
char *rpath = format_dir_path (path);
if (strcmp (rpath, "/") == 0) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,
"invalid path");
g_free (rpath);
return NULL;
}
SeafRepo *repo = seaf_repo_manager_get_repo (seaf->repo_mgr, repo_id);
if (!repo) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_INTERNAL,
"Get repo error");
return NULL;
}
SeafCommit *commit = seaf_commit_manager_get_commit (seaf->commit_mgr,
repo->id, repo->version,
repo->head->commit_id);
if (!commit) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_INTERNAL,
"Get commit error");
seaf_repo_unref (repo);
return NULL;
}
SeafDirent *dirent = seaf_fs_manager_get_dirent_by_path (seaf->fs_mgr,
repo->store_id, repo->version,
commit->root_id, rpath,
error);
g_free (rpath);
if (!dirent) {
filter_error (error);
seaf_repo_unref (repo);
seaf_commit_unref (commit);
return NULL;
}
GObject *obj = g_object_new (SEAFILE_TYPE_DIRENT,
"obj_id", dirent->id,
"obj_name", dirent->name,
"mode", dirent->mode,
"version", dirent->version,
"mtime", dirent->mtime,
"size", dirent->size,
"modifier", dirent->modifier,
NULL);
seaf_repo_unref (repo);
seaf_commit_unref (commit);
seaf_dirent_free (dirent);
return obj;
}
char *
seafile_list_file_blocks (const char *repo_id,
const char *file_id,
int offset, int limit,
GError **error)
{
SeafRepo *repo;
Seafile *file;
GString *buf = g_string_new ("");
int index = 0;
if (!repo_id || !is_uuid_valid(repo_id)) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_DIR_ID, "Bad repo id");
return NULL;
}
if (!file_id || !is_object_id_valid(file_id)) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_DIR_ID, "Bad file id");
return NULL;
}
repo = seaf_repo_manager_get_repo (seaf->repo_mgr, repo_id);
if (!repo) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Bad repo id");
return NULL;
}
file = seaf_fs_manager_get_seafile (seaf->fs_mgr,
repo->store_id,
repo->version, file_id);
if (!file) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_DIR_ID, "Bad file id");
seaf_repo_unref (repo);
return NULL;
}
if (offset < 0)
offset = 0;
for (index = 0; index < file->n_blocks; index++) {
if (index < offset) {
continue;
}
if (limit > 0) {
if (index >= offset + limit)
break;
}
g_string_append_printf (buf, "%s\n", file->blk_sha1s[index]);
}
seafile_unref (file);
seaf_repo_unref (repo);
return g_string_free (buf, FALSE);
}
/*
* Directories are always before files. Otherwise compare the names.
*/
static gint
comp_dirent_func (gconstpointer a, gconstpointer b)
{
const SeafDirent *dent_a = a, *dent_b = b;
if (S_ISDIR(dent_a->mode) && S_ISREG(dent_b->mode))
return -1;
if (S_ISREG(dent_a->mode) && S_ISDIR(dent_b->mode))
return 1;
return strcasecmp (dent_a->name, dent_b->name);
}
GList *
seafile_list_dir (const char *repo_id,
const char *dir_id, int offset, int limit, GError **error)
{
SeafRepo *repo;
SeafDir *dir;
SeafDirent *dent;
SeafileDirent *d;
GList *res = NULL;
GList *p;
if (!repo_id || !is_uuid_valid(repo_id)) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_DIR_ID, "Bad repo id");
return NULL;
}
if (!dir_id || !is_object_id_valid (dir_id)) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_DIR_ID, "Bad dir id");
return NULL;
}
repo = seaf_repo_manager_get_repo (seaf->repo_mgr, repo_id);
if (!repo) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Bad repo id");
return NULL;
}
dir = seaf_fs_manager_get_seafdir (seaf->fs_mgr,
repo->store_id, repo->version, dir_id);
if (!dir) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_DIR_ID, "Bad dir id");
seaf_repo_unref (repo);
return NULL;
}
dir->entries = g_list_sort (dir->entries, comp_dirent_func);
if (offset < 0) {
offset = 0;
}
int index = 0;
for (p = dir->entries; p != NULL; p = p->next, index++) {
if (index < offset) {
continue;
}
if (limit > 0) {
if (index >= offset + limit)
break;
}
dent = p->data;
if (!is_object_id_valid (dent->id))
continue;
d = g_object_new (SEAFILE_TYPE_DIRENT,
"obj_id", dent->id,
"obj_name", dent->name,
"mode", dent->mode,
"version", dent->version,
"mtime", dent->mtime,
"size", dent->size,
"permission", "",
NULL);
res = g_list_prepend (res, d);
}
seaf_dir_free (dir);
seaf_repo_unref (repo);
res = g_list_reverse (res);
return res;
}
GList *
seafile_list_file_revisions (const char *repo_id,
const char *commit_id,
const char *path,
int limit,
GError **error)
{
if (!repo_id || !path) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,
"Bad arguments");
return NULL;
}
if (!is_uuid_valid (repo_id)) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid repo id");
return NULL;
}
char *rpath = format_dir_path (path);
GList *commit_list;
commit_list = seaf_repo_manager_list_file_revisions (seaf->repo_mgr,
repo_id, commit_id, rpath,
limit, FALSE, FALSE, error);
g_free (rpath);
return commit_list;
}
GList *
seafile_calc_files_last_modified (const char *repo_id,
const char *parent_dir,
int limit,
GError **error)
{
if (!repo_id || !parent_dir) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,
"Bad arguments");
return NULL;
}
if (!is_uuid_valid (repo_id)) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid repo id");
return NULL;
}
char *rpath = format_dir_path (parent_dir);
GList *ret = seaf_repo_manager_calc_files_last_modified (seaf->repo_mgr,
repo_id, rpath,
limit, error);
g_free (rpath);
return ret;
}
int
seafile_revert_file (const char *repo_id,
const char *commit_id,
const char *path,
const char *user,
GError **error)
{
if (!repo_id || !commit_id || !path || !user) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,
"Bad arguments");
return -1;
}
if (!is_uuid_valid (repo_id)) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid repo id");
return -1;
}
if (!is_object_id_valid (commit_id)) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid commit id");
return -1;
}
char *rpath = format_dir_path (path);
int ret = seaf_repo_manager_revert_file (seaf->repo_mgr,
repo_id, commit_id,
rpath, user, error);
g_free (rpath);
return ret;
}
int
seafile_revert_dir (const char *repo_id,
const char *commit_id,
const char *path,
const char *user,
GError **error)
{
if (!repo_id || !commit_id || !path || !user) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,
"Bad arguments");
return -1;
}
if (!is_uuid_valid (repo_id)) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid repo id");
return -1;
}
if (!is_object_id_valid (commit_id)) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid commit id");
return -1;
}
char *rpath = format_dir_path (path);
int ret = seaf_repo_manager_revert_dir (seaf->repo_mgr,
repo_id, commit_id,
rpath, user, error);
g_free (rpath);
return ret;
}
char *
seafile_check_repo_blocks_missing (const char *repo_id,
const char *blockids_json,
GError **error)
{
json_t *array, *value, *ret_json;
json_error_t err;
size_t index;
char *json_data, *ret;
SeafRepo *repo = NULL;
array = json_loadb (blockids_json, strlen(blockids_json), 0, &err);
if (!array) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,
"Bad arguments");
return NULL;
}
repo = seaf_repo_manager_get_repo (seaf->repo_mgr, repo_id);
if (!repo) {
seaf_warning ("Failed to get repo %.8s.\n", repo_id);
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,
"Repo not found");
json_decref (array);
return NULL;
}
ret_json = json_array();
size_t n = json_array_size (array);
for (index = 0; index < n; index++) {
value = json_array_get (array, index);
const char *blockid = json_string_value (value);
if (!blockid)
continue;
if (!seaf_block_manager_block_exists(seaf->block_mgr, repo_id,
repo->version, blockid)) {
json_array_append_new (ret_json, json_string(blockid));
}
}
json_data = json_dumps (ret_json, 0);
ret = g_strdup (json_data);
free (json_data);
json_decref (ret_json);
json_decref (array);
seaf_repo_unref (repo);
return ret;
}
GList *
seafile_get_deleted (const char *repo_id, int show_days,
const char *path, const char *scan_stat,
int limit, GError **error)
{
if (!repo_id) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,
"Bad arguments");
return NULL;
}
if (!is_uuid_valid (repo_id)) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid repo id");
return NULL;
}
char *rpath = NULL;
if (path)
rpath = format_dir_path (path);
GList *ret = seaf_repo_manager_get_deleted_entries (seaf->repo_mgr,
repo_id, show_days,
rpath, scan_stat,
limit, error);
g_free (rpath);
return ret;
}
char *
seafile_generate_repo_token (const char *repo_id,
const char *email,
GError **error)
{
char *token;
if (!repo_id || !email) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Arguments should not be empty");
return NULL;
}
if (!is_uuid_valid (repo_id)) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid repo id");
return NULL;
}
token = seaf_repo_manager_generate_repo_token (seaf->repo_mgr, repo_id, email, error);
return token;
}
int
seafile_delete_repo_token (const char *repo_id,
const char *token,
const char *user,
GError **error)
{
if (!repo_id || !token || !user) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Arguments should not be empty");
return -1;
}
if (!is_uuid_valid (repo_id)) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid repo id");
return -1;
}
return seaf_repo_manager_delete_token (seaf->repo_mgr,
repo_id, token, user, error);
}
GList *
seafile_list_repo_tokens (const char *repo_id,
GError **error)
{
GList *ret_list;
if (!repo_id) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Arguments should not be empty");
return NULL;
}
if (!is_uuid_valid (repo_id)) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid repo id");
return NULL;
}
ret_list = seaf_repo_manager_list_repo_tokens (seaf->repo_mgr, repo_id, error);
return ret_list;
}
GList *
seafile_list_repo_tokens_by_email (const char *email,
GError **error)
{
GList *ret_list;
if (!email) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Arguments should not be empty");
return NULL;
}
ret_list = seaf_repo_manager_list_repo_tokens_by_email (seaf->repo_mgr, email, error);
return ret_list;
}
int
seafile_delete_repo_tokens_by_peer_id(const char *email,
const char *peer_id,
GError **error)
{
if (!email || !peer_id) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Arguments should not be empty");
return -1;
}
/* check the peer id */
if (strlen(peer_id) != 40) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "invalid peer id");
return -1;
}
const char *c = peer_id;
while (*c) {
char v = *c;
if ((v >= '0' && v <= '9') || (v >= 'a' && v <= 'z')) {
c++;
continue;
} else {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "invalid peer id");
return -1;
}
}
GList *tokens = NULL;
if (seaf_repo_manager_delete_repo_tokens_by_peer_id (seaf->repo_mgr, email, peer_id, &tokens, error) < 0) {
g_list_free_full (tokens, (GDestroyNotify)g_free);
return -1;
}
#ifdef HAVE_EVHTP
seaf_http_server_invalidate_tokens(seaf->http_server, tokens);
#endif
g_list_free_full (tokens, (GDestroyNotify)g_free);
return 0;
}
int
seafile_delete_repo_tokens_by_email (const char *email,
GError **error)
{
if (!email) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Arguments should not be empty");
return -1;
}
return seaf_repo_manager_delete_repo_tokens_by_email (seaf->repo_mgr, email, error);
}
char *
seafile_check_permission (const char *repo_id, const char *user, GError **error)
{
if (!repo_id || !user) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Arguments should not be empty");
return NULL;
}
if (!is_uuid_valid (repo_id)) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid repo id");
return NULL;
}
if (strlen(user) == 0)
return NULL;
return seaf_repo_manager_check_permission (seaf->repo_mgr,
repo_id, user, error);
}
char *
seafile_check_permission_by_path (const char *repo_id, const char *path,
const char *user, GError **error)
{
return seafile_check_permission (repo_id, user, error);
}
GList *
seafile_list_dir_with_perm (const char *repo_id,
const char *path,
const char *dir_id,
const char *user,
int offset,
int limit,
GError **error)
{
if (!repo_id || !is_uuid_valid (repo_id)) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid repo id");
return NULL;
}
if (!path) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid path");
return NULL;
}
if (!dir_id || !is_object_id_valid (dir_id)) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid dir id");
return NULL;
}
if (!user) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid user");
return NULL;
}
char *rpath = format_dir_path (path);
GList *ret = seaf_repo_manager_list_dir_with_perm (seaf->repo_mgr,
repo_id,
rpath,
dir_id,
user,
offset,
limit,
error);
g_free (rpath);
return ret;
}
int
seafile_set_share_permission (const char *repo_id,
const char *from_email,
const char *to_email,
const char *permission,
GError **error)
{
if (!repo_id || !from_email || !to_email || !permission) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Arguments should not be empty");
return -1;
}
if (!is_uuid_valid (repo_id)) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,
"Invalid repo_id parameter");
return -1;
}
if (!is_permission_valid (permission)) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,
"Invalid permission parameter");
return -1;
}
return seaf_share_manager_set_permission (seaf->share_mgr,
repo_id,
from_email,
to_email,
permission);
}
int
seafile_set_group_repo_permission (int group_id,
const char *repo_id,
const char *permission,
GError **error)
{
if (!repo_id || !permission) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Arguments should not be empty");
return -1;
}
if (!is_uuid_valid (repo_id)) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid repo id");
return -1;
}
if (!is_permission_valid (permission)) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,
"Invalid permission parameter");
return -1;
}
return seaf_repo_manager_set_group_repo_perm (seaf->repo_mgr,
repo_id,
group_id,
permission,
error);
}
char *
seafile_get_file_id_by_commit_and_path(const char *repo_id,
const char *commit_id,
const char *path,
GError **error)
{
SeafRepo *repo;
SeafCommit *commit;
char *file_id;
guint32 mode;
if (!repo_id || !is_uuid_valid(repo_id) || !commit_id || !path) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,
"Arguments should not be empty");
return NULL;
}
repo = seaf_repo_manager_get_repo (seaf->repo_mgr, repo_id);
if (!repo) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Bad repo id");
return NULL;
}
commit = seaf_commit_manager_get_commit(seaf->commit_mgr,
repo_id,
repo->version,
commit_id);
if (!commit) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,
"bad commit id");
seaf_repo_unref (repo);
return NULL;
}
char *rpath = format_dir_path (path);
file_id = seaf_fs_manager_path_to_obj_id (seaf->fs_mgr,
repo->store_id, repo->version,
commit->root_id, rpath, &mode, error);
if (file_id && S_ISDIR(mode)) {
g_free (file_id);
file_id = NULL;
}
g_free (rpath);
filter_error (error);
seaf_commit_unref(commit);
seaf_repo_unref (repo);
return file_id;
}
/* Virtual repo related */
char *
seafile_create_virtual_repo (const char *origin_repo_id,
const char *path,
const char *repo_name,
const char *repo_desc,
const char *owner,
const char *passwd,
GError **error)
{
if (!origin_repo_id || !path ||!repo_name || !repo_desc || !owner) {
g_set_error (error, 0, SEAF_ERR_BAD_ARGS, "Argument should not be null");
return NULL;
}
if (!is_uuid_valid (origin_repo_id)) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid repo id");
return NULL;
}
char *repo_id;
char *rpath = format_dir_path (path);
repo_id = seaf_repo_manager_create_virtual_repo (seaf->repo_mgr,
origin_repo_id, rpath,
repo_name, repo_desc,
owner, passwd, error);
g_free (rpath);
return repo_id;
}
GList *
seafile_get_virtual_repos_by_owner (const char *owner, GError **error)
{
GList *repos, *ret = NULL, *ptr;
SeafRepo *r, *o;
SeafileRepo *repo;
char *orig_repo_id;
gboolean is_original_owner;
if (!owner) {
g_set_error (error, 0, SEAF_ERR_BAD_ARGS, "Argument should not be null");
return NULL;
}
repos = seaf_repo_manager_get_virtual_repos_by_owner (seaf->repo_mgr,
owner,
error);
for (ptr = repos; ptr != NULL; ptr = ptr->next) {
r = ptr->data;
orig_repo_id = r->virtual_info->origin_repo_id;
o = seaf_repo_manager_get_repo (seaf->repo_mgr, orig_repo_id);
if (!o) {
seaf_warning ("Failed to get origin repo %.10s.\n", orig_repo_id);
seaf_repo_unref (r);
continue;
}
char *orig_owner = seaf_repo_manager_get_repo_owner (seaf->repo_mgr,
orig_repo_id);
if (g_strcmp0 (orig_owner, owner) == 0)
is_original_owner = TRUE;
else
is_original_owner = FALSE;
g_free (orig_owner);
char *perm = seaf_repo_manager_check_permission (seaf->repo_mgr,
r->id, owner, NULL);
repo = (SeafileRepo *)convert_repo (r);
if (repo) {
g_object_set (repo, "is_original_owner", is_original_owner,
"origin_repo_name", o->name,
"virtual_perm", perm, NULL);
ret = g_list_prepend (ret, repo);
}
seaf_repo_unref (r);
seaf_repo_unref (o);
g_free (perm);
}
g_list_free (repos);
return g_list_reverse (ret);
}
GObject *
seafile_get_virtual_repo (const char *origin_repo,
const char *path,
const char *owner,
GError **error)
{
char *repo_id;
GObject *repo_obj;
char *rpath = format_dir_path (path);
repo_id = seaf_repo_manager_get_virtual_repo_id (seaf->repo_mgr,
origin_repo,
rpath,
owner);
g_free (rpath);
if (!repo_id)
return NULL;
repo_obj = seafile_get_repo (repo_id, error);
g_free (repo_id);
return repo_obj;
}
/* System default library */
char *
seafile_get_system_default_repo_id (GError **error)
{
return get_system_default_repo_id(seaf);
}
static int
update_valid_since_time (SeafRepo *repo, gint64 new_time)
{
int ret = 0;
gint64 old_time = seaf_repo_manager_get_repo_valid_since (repo->manager,
repo->id);
if (new_time > 0) {
if (new_time > old_time)
ret = seaf_repo_manager_set_repo_valid_since (repo->manager,
repo->id,
new_time);
} else if (new_time == 0) {
/* Only the head commit is valid after GC if no history is kept. */
SeafCommit *head = seaf_commit_manager_get_commit (seaf->commit_mgr,
repo->id, repo->version,
repo->head->commit_id);
if (head && (old_time < 0 || head->ctime > (guint64)old_time))
ret = seaf_repo_manager_set_repo_valid_since (repo->manager,
repo->id,
head->ctime);
seaf_commit_unref (head);
}
return ret;
}
/* Clean up a repo's history.
* It just set valid-since time but not actually delete the data.
*/
int
seafile_clean_up_repo_history (const char *repo_id, int keep_days, GError **error)
{
SeafRepo *repo;
int ret;
if (!is_uuid_valid (repo_id)) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid arguments");
return -1;
}
repo = seaf_repo_manager_get_repo (seaf->repo_mgr, repo_id);
if (!repo) {
seaf_warning ("Cannot find repo %s.\n", repo_id);
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid arguments");
return -1;
}
gint64 truncate_time, now;
if (keep_days > 0) {
now = (gint64)time(NULL);
truncate_time = now - keep_days * 24 * 3600;
} else
truncate_time = 0;
ret = update_valid_since_time (repo, truncate_time);
if (ret < 0) {
seaf_warning ("Failed to update valid since time for repo %.8s.\n", repo->id);
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, "Database error");
}
seaf_repo_unref (repo);
return ret;
}
GList *
seafile_get_shared_users_for_subdir (const char *repo_id,
const char *path,
const char *from_user,
GError **error)
{
if (!repo_id || !path || !from_user) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Argument should not be null");
return NULL;
}
if (!is_uuid_valid (repo_id)) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid repo_id");
return NULL;
}
char *rpath = format_dir_path (path);
GList *ret = seaf_repo_manager_get_shared_users_for_subdir (seaf->repo_mgr,
repo_id, rpath,
from_user, error);
g_free (rpath);
return ret;
}
GList *
seafile_get_shared_groups_for_subdir (const char *repo_id,
const char *path,
const char *from_user,
GError **error)
{
if (!repo_id || !path || !from_user) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Argument should not be null");
return NULL;
}
if (!is_uuid_valid (repo_id)) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid repo_id");
return NULL;
}
char *rpath = format_dir_path (path);
GList *ret = seaf_repo_manager_get_shared_groups_for_subdir (seaf->repo_mgr,
repo_id, rpath,
from_user, error);
g_free (rpath);
return ret;
}
gint64
seafile_get_total_file_number (GError **error)
{
return seaf_get_total_file_number (error);
}
gint64
seafile_get_total_storage (GError **error)
{
return seaf_get_total_storage (error);
}
GObject *
seafile_get_file_count_info_by_path (const char *repo_id,
const char *path,
GError **error)
{
if (!repo_id || !path) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Argument should not be null");
return NULL;
}
GObject *ret = NULL;
SeafRepo *repo = NULL;
repo = seaf_repo_manager_get_repo (seaf->repo_mgr, repo_id);
if (!repo) {
seaf_warning ("Failed to get repo %.10s\n", repo_id);
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL,
"Library not exists");
return NULL;
}
ret = seaf_fs_manager_get_file_count_info_by_path (seaf->fs_mgr,
repo->store_id,
repo->version,
repo->root_id,
path, error);
seaf_repo_unref (repo);
return ret;
}
char *
seafile_get_trash_repo_owner (const char *repo_id, GError **error)
{
if (!repo_id) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Argument should not be null");
return NULL;
}
return seaf_get_trash_repo_owner (repo_id);
}
int
seafile_mkdir_with_parents (const char *repo_id, const char *parent_dir,
const char *new_dir_path, const char *user,
GError **error)
{
if (!repo_id || !parent_dir || !new_dir_path || !user) {
g_set_error (error, 0, SEAF_ERR_BAD_ARGS, "Argument should not be null");
return -1;
}
if (!is_uuid_valid (repo_id)) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid repo id");
return -1;
}
if (seaf_repo_manager_mkdir_with_parents (seaf->repo_mgr, repo_id,
parent_dir, new_dir_path,
user, error) < 0) {
return -1;
}
return 0;
}
int
seafile_set_server_config_int (const char *group, const char *key, int value,
GError **error)
{
if (!group || !key) {
g_set_error (error, 0, SEAF_ERR_BAD_ARGS, "Argument should not be null");
return -1;
}
return seaf_cfg_manager_set_config_int (seaf->cfg_mgr, group, key, value);
}
int
seafile_get_server_config_int (const char *group, const char *key, GError **error)
{
if (!group || !key ) {
g_set_error (error, 0, SEAF_ERR_BAD_ARGS, "Argument should not be null");
return -1;
}
return seaf_cfg_manager_get_config_int (seaf->cfg_mgr, group, key);
}
int
seafile_set_server_config_int64 (const char *group, const char *key, gint64 value,
GError **error)
{
if (!group || !key) {
g_set_error (error, 0, SEAF_ERR_BAD_ARGS, "Argument should not be null");
return -1;
}
return seaf_cfg_manager_set_config_int64 (seaf->cfg_mgr, group, key, value);
}
gint64
seafile_get_server_config_int64 (const char *group, const char *key, GError **error)
{
if (!group || !key ) {
g_set_error (error, 0, SEAF_ERR_BAD_ARGS, "Argument should not be null");
return -1;
}
return seaf_cfg_manager_get_config_int64 (seaf->cfg_mgr, group, key);
}
int
seafile_set_server_config_string (const char *group, const char *key, const char *value,
GError **error)
{
if (!group || !key || !value) {
g_set_error (error, 0, SEAF_ERR_BAD_ARGS, "Argument should not be null");
return -1;
}
return seaf_cfg_manager_set_config_string (seaf->cfg_mgr, group, key, value);
}
char *
seafile_get_server_config_string (const char *group, const char *key, GError **error)
{
if (!group || !key ) {
g_set_error (error, 0, SEAF_ERR_BAD_ARGS, "Argument should not be null");
return NULL;
}
return seaf_cfg_manager_get_config_string (seaf->cfg_mgr, group, key);
}
int
seafile_set_server_config_boolean (const char *group, const char *key, int value,
GError **error)
{
if (!group || !key) {
g_set_error (error, 0, SEAF_ERR_BAD_ARGS, "Argument should not be null");
return -1;
}
return seaf_cfg_manager_set_config_boolean (seaf->cfg_mgr, group, key, value);
}
int
seafile_get_server_config_boolean (const char *group, const char *key, GError **error)
{
if (!group || !key ) {
g_set_error (error, 0, SEAF_ERR_BAD_ARGS, "Argument should not be null");
return -1;
}
return seaf_cfg_manager_get_config_boolean (seaf->cfg_mgr, group, key);
}
GObject *
seafile_get_group_shared_repo_by_path (const char *repo_id,
const char *path,
int group_id,
int is_org,
GError **error)
{
if (!repo_id || group_id < 0) {
g_set_error (error, 0, SEAF_ERR_BAD_ARGS, "Arguments error");
return NULL;
}
SeafRepoManager *mgr = seaf->repo_mgr;
return seaf_get_group_shared_repo_by_path (mgr, repo_id, path, group_id, is_org ? TRUE:FALSE, error);
}
GObject *
seafile_get_shared_repo_by_path (const char *repo_id,
const char *path,
const char *shared_to,
int is_org,
GError **error)
{
if (!repo_id || !shared_to) {
g_set_error (error, 0, SEAF_ERR_BAD_ARGS, "Arguments error");
return NULL;
}
SeafRepoManager *mgr = seaf->repo_mgr;
return seaf_get_shared_repo_by_path (mgr, repo_id, path, shared_to, is_org ? TRUE:FALSE, error);
}
GList *
seafile_get_group_repos_by_user (const char *user, GError **error)
{
if (!user) {
g_set_error (error, 0, SEAF_ERR_BAD_ARGS, "Arguments error");
return NULL;
}
SeafRepoManager *mgr = seaf->repo_mgr;
return seaf_get_group_repos_by_user (mgr, user, -1, error);
}
GList *
seafile_get_org_group_repos_by_user (const char *user, int org_id, GError **error)
{
if (!user) {
g_set_error (error, 0, SEAF_ERR_BAD_ARGS, "Arguments error");
return NULL;
}
SeafRepoManager *mgr = seaf->repo_mgr;
return seaf_get_group_repos_by_user (mgr, user, org_id, error);
}
int
seafile_repo_has_been_shared (const char *repo_id, int including_groups, GError **error)
{
if (!repo_id) {
g_set_error (error, 0, SEAF_ERR_BAD_ARGS, "Arguments error");
return FALSE;
}
gboolean exists = seaf_share_manager_repo_has_been_shared (seaf->share_mgr, repo_id,
including_groups ? TRUE : FALSE);
return exists ? 1 : 0;
}
GList *
seafile_get_shared_users_by_repo (const char *repo_id, GError **error)
{
if (!repo_id) {
g_set_error (error, 0, SEAF_ERR_BAD_ARGS, "Arguments error");
return NULL;
}
return seaf_share_manager_get_shared_users_by_repo (seaf->share_mgr,
repo_id);
}
GList *
seafile_org_get_shared_users_by_repo (int org_id,
const char *repo_id,
GError **error)
{
if (!repo_id || org_id < 0) {
g_set_error (error, 0, SEAF_ERR_BAD_ARGS, "Arguments error");
return NULL;
}
return seaf_share_manager_org_get_shared_users_by_repo (seaf->share_mgr,
org_id, repo_id);
}
/* Resumable file upload. */
gint64
seafile_get_upload_tmp_file_offset (const char *repo_id, const char *file_path,
GError **error)
{
if (!repo_id || !is_uuid_valid(repo_id)) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,
"Invalid repo id");
return -1;
}
int path_len;
if (!file_path || (path_len = strlen(file_path)) == 0) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,
"Invalid file path");
return -1;
}
char *rfile_path = format_dir_path (file_path);
gint64 ret = seaf_repo_manager_get_upload_tmp_file_offset (seaf->repo_mgr, repo_id,
rfile_path, error);
g_free (rfile_path);
return ret;
}
char *
seafile_convert_repo_path (const char *repo_id,
const char *path,
const char *user,
int is_org,
GError **error)
{
if (!is_uuid_valid(repo_id) || !path || !user) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Arguments error");
return NULL;
}
char *rpath = format_dir_path (path);
char *ret = seaf_repo_manager_convert_repo_path(seaf->repo_mgr, repo_id, rpath, user, is_org ? TRUE : FALSE, error);
g_free(rpath);
return ret;
}
int
seafile_set_repo_status(const char *repo_id, int status, GError **error)
{
if (!is_uuid_valid(repo_id) ||
status < 0 || status >= N_REPO_STATUS) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Arguments error");
return -1;
}
return seaf_repo_manager_set_repo_status(seaf->repo_mgr, repo_id, status);
}
int
seafile_get_repo_status(const char *repo_id, GError **error)
{
int status;
if (!is_uuid_valid(repo_id)) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Arguments error");
return -1;
}
status = seaf_repo_manager_get_repo_status(seaf->repo_mgr, repo_id);
return (status == -1) ? 0 : status;
}
GList *
seafile_search_files (const char *repo_id, const char *str, GError **error)
{
return seafile_search_files_by_path (repo_id, NULL, str, error);
}
GList *
seafile_search_files_by_path (const char *repo_id, const char *path, const char *str, GError **error)
{
if (!is_uuid_valid (repo_id)) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Invalid repo id");
return NULL;
}
GList *file_list = seaf_fs_manager_search_files_by_path (seaf->fs_mgr, repo_id, path, str);
GList *ret = NULL, *ptr;
for (ptr = file_list; ptr; ptr=ptr->next) {
SearchResult *sr = ptr->data;
SeafileSearchResult *search_result = seafile_search_result_new ();
g_object_set (search_result, "path", sr->path, "size", sr->size,
"mtime", sr->mtime, "is_dir", sr->is_dir, NULL);
ret = g_list_prepend (ret, search_result);
g_free (sr->path);
g_free (sr);
}
return g_list_reverse (ret);
}
/*RPC functions merged from ccnet-server*/
int
ccnet_rpc_add_emailuser (const char *email, const char *passwd,
int is_staff, int is_active, GError **error)
{
CcnetUserManager *user_mgr = seaf->user_mgr;
int ret;
if (!email || !passwd) {
g_set_error (error, CCNET_DOMAIN, CCNET_ERR_INTERNAL, "Email and passwd can not be NULL");
return -1;
}
ret = ccnet_user_manager_add_emailuser (user_mgr, email, passwd,
is_staff, is_active);
return ret;
}
int
ccnet_rpc_remove_emailuser (const char *source, const char *email, GError **error)
{
CcnetUserManager *user_mgr = seaf->user_mgr;
int ret;
if (!email) {
g_set_error (error, CCNET_DOMAIN, CCNET_ERR_INTERNAL, "Email can not be NULL");
return -1;
}
ret = ccnet_user_manager_remove_emailuser (user_mgr, source, email);
return ret;
}
int
ccnet_rpc_validate_emailuser (const char *email, const char *passwd, GError **error)
{
CcnetUserManager *user_mgr = seaf->user_mgr;
int ret;
if (!email || !passwd) {
g_set_error (error, CCNET_DOMAIN, CCNET_ERR_INTERNAL, "Email and passwd can not be NULL");
return -1;
}
if (passwd[0] == 0)
return -1;
ret = ccnet_user_manager_validate_emailuser (user_mgr, email, passwd);
return ret;
}
GObject*
ccnet_rpc_get_emailuser (const char *email, GError **error)
{
if (!email) {
g_set_error (error, CCNET_DOMAIN, CCNET_ERR_INTERNAL, "Email can not be NULL");
return NULL;
}
CcnetUserManager *user_mgr = seaf->user_mgr;
CcnetEmailUser *emailuser = NULL;
emailuser = ccnet_user_manager_get_emailuser (user_mgr, email, error);
return (GObject *)emailuser;
}
GObject*
ccnet_rpc_get_emailuser_with_import (const char *email, GError **error)
{
if (!email) {
g_set_error (error, CCNET_DOMAIN, CCNET_ERR_INTERNAL, "Email can not be NULL");
return NULL;
}
CcnetUserManager *user_mgr = seaf->user_mgr;
CcnetEmailUser *emailuser = NULL;
emailuser = ccnet_user_manager_get_emailuser_with_import (user_mgr, email, error);
return (GObject *)emailuser;
}
GObject*
ccnet_rpc_get_emailuser_by_id (int id, GError **error)
{
CcnetUserManager *user_mgr = seaf->user_mgr;
CcnetEmailUser *emailuser = NULL;
emailuser = ccnet_user_manager_get_emailuser_by_id (user_mgr, id);
return (GObject *)emailuser;
}
GList*
ccnet_rpc_get_emailusers (const char *source,
int start, int limit,
const char *status,
GError **error)
{
CcnetUserManager *user_mgr = seaf->user_mgr;
GList *emailusers = NULL;
emailusers = ccnet_user_manager_get_emailusers (user_mgr, source, start, limit, status);
return emailusers;
}
GList*
ccnet_rpc_search_emailusers (const char *source,
const char *email_patt,
int start, int limit,
GError **error)
{
CcnetUserManager *user_mgr = seaf->user_mgr;
GList *emailusers = NULL;
emailusers = ccnet_user_manager_search_emailusers (user_mgr,
source,
email_patt,
start, limit);
return emailusers;
}
GList*
ccnet_rpc_search_groups (const char *group_patt,
int start, int limit,
GError **error)
{
CcnetGroupManager *group_mgr = seaf->group_mgr;
GList *groups = NULL;
groups = ccnet_group_manager_search_groups (group_mgr,
group_patt,
start, limit);
return groups;
}
GList *
ccnet_rpc_search_group_members (int group_id, const char *pattern, GError **error)
{
CcnetGroupManager *group_mgr = seaf->group_mgr;
GList *ret = NULL;
ret = ccnet_group_manager_search_group_members (group_mgr, group_id, pattern);
return ret;
}
GList*
ccnet_rpc_get_top_groups (int including_org, GError **error)
{
CcnetGroupManager *group_mgr = seaf->group_mgr;
GList *groups = NULL;
groups = ccnet_group_manager_get_top_groups (group_mgr, including_org ? TRUE : FALSE, error);
return groups;
}
GList*
ccnet_rpc_get_child_groups (int group_id, GError **error)
{
CcnetGroupManager *group_mgr = seaf->group_mgr;
GList *groups = NULL;
groups = ccnet_group_manager_get_child_groups (group_mgr, group_id, error);
return groups;
}
GList*
ccnet_rpc_get_descendants_groups(int group_id, GError **error)
{
CcnetGroupManager *group_mgr = seaf->group_mgr;
GList *groups = NULL;
groups = ccnet_group_manager_get_descendants_groups (group_mgr, group_id, error);
return groups;
}
gint64
ccnet_rpc_count_emailusers (const char *source, GError **error)
{
CcnetUserManager *user_mgr = seaf->user_mgr;
return ccnet_user_manager_count_emailusers (user_mgr, source);
}
gint64
ccnet_rpc_count_inactive_emailusers (const char *source, GError **error)
{
CcnetUserManager *user_mgr = seaf->user_mgr;
return ccnet_user_manager_count_inactive_emailusers (user_mgr, source);
}
int
ccnet_rpc_update_emailuser (const char *source, int id, const char* passwd,
int is_staff, int is_active,
GError **error)
{
CcnetUserManager *user_mgr = seaf->user_mgr;
return ccnet_user_manager_update_emailuser(user_mgr, source, id, passwd,
is_staff, is_active);
}
int
ccnet_rpc_update_role_emailuser (const char* email, const char* role,
GError **error)
{
CcnetUserManager *user_mgr = seaf->user_mgr;
return ccnet_user_manager_update_role_emailuser(user_mgr, email, role);
}
GList*
ccnet_rpc_get_superusers (GError **error)
{
CcnetUserManager *user_mgr = seaf->user_mgr;
return ccnet_user_manager_get_superusers(user_mgr);
}
GList *
ccnet_rpc_get_emailusers_in_list(const char *source, const char *user_list, GError **error)
{
if (!user_list || !source) {
g_set_error (error, CCNET_DOMAIN, CCNET_ERR_INTERNAL, "Bad arguments");
return NULL;
}
CcnetUserManager *user_mgr = seaf->user_mgr;
return ccnet_user_manager_get_emailusers_in_list (user_mgr, source, user_list, error);
}
int
ccnet_rpc_update_emailuser_id (const char *old_email, const char *new_email, GError **error)
{
if (!old_email || !new_email) {
g_set_error (error, CCNET_DOMAIN, CCNET_ERR_INTERNAL, "Bad arguments");
return -1;
}
CcnetUserManager *user_mgr = seaf->user_mgr;
return ccnet_user_manager_update_emailuser_id (user_mgr, old_email, new_email, error);
}
int
ccnet_rpc_create_group (const char *group_name, const char *user_name,
const char *type, int parent_group_id, GError **error)
{
CcnetGroupManager *group_mgr = seaf->group_mgr;
int ret;
if (!group_name || !user_name) {
g_set_error (error, CCNET_DOMAIN, CCNET_ERR_INTERNAL,
"Group name and user name can not be NULL");
return -1;
}
ret = ccnet_group_manager_create_group (group_mgr, group_name, user_name, parent_group_id, error);
return ret;
}
int
ccnet_rpc_create_org_group (int org_id, const char *group_name,
const char *user_name, int parent_group_id, GError **error)
{
CcnetGroupManager *group_mgr = seaf->group_mgr;
int ret;
if (org_id < 0 || !group_name || !user_name) {
g_set_error (error, CCNET_DOMAIN, CCNET_ERR_INTERNAL, "Bad args");
return -1;
}
ret = ccnet_group_manager_create_org_group (group_mgr, org_id,
group_name, user_name, parent_group_id, error);
return ret;
}
int
ccnet_rpc_remove_group (int group_id, GError **error)
{
CcnetGroupManager *group_mgr = seaf->group_mgr;
int ret;
if (group_id <= 0) {
g_set_error (error, CCNET_DOMAIN, CCNET_ERR_INTERNAL,
"Invalid group_id parameter");
return -1;
}
ret = ccnet_group_manager_remove_group (group_mgr, group_id, FALSE, error);
return ret;
}
int
ccnet_rpc_group_add_member (int group_id, const char *user_name,
const char *member_name, GError **error)
{
CcnetGroupManager *group_mgr = seaf->group_mgr;
int ret;
if (group_id <= 0 || !user_name || !member_name) {
g_set_error (error, CCNET_DOMAIN, CCNET_ERR_INTERNAL,
"Group id and user name and member name can not be NULL");
return -1;
}
ret = ccnet_group_manager_add_member (group_mgr, group_id, user_name, member_name,
error);
return ret;
}
int
ccnet_rpc_group_remove_member (int group_id, const char *user_name,
const char *member_name, GError **error)
{
CcnetGroupManager *group_mgr = seaf->group_mgr;
int ret;
if (!user_name || !member_name) {
g_set_error (error, CCNET_DOMAIN, CCNET_ERR_INTERNAL,
"User name and member name can not be NULL");
return -1;
}
ret = ccnet_group_manager_remove_member (group_mgr, group_id, user_name,
member_name, error);
return ret;
}
int
ccnet_rpc_group_set_admin (int group_id, const char *member_name,
GError **error)
{
CcnetGroupManager *group_mgr = seaf->group_mgr;
int ret;
if (group_id <= 0 || !member_name) {
g_set_error (error, CCNET_DOMAIN, CCNET_ERR_INTERNAL,
"Bad arguments");
return -1;
}
ret = ccnet_group_manager_set_admin (group_mgr, group_id, member_name,
error);
return ret;
}
int
ccnet_rpc_group_unset_admin (int group_id, const char *member_name,
GError **error)
{
CcnetGroupManager *group_mgr = seaf->group_mgr;
int ret;
if (group_id <= 0 || !member_name) {
g_set_error (error, CCNET_DOMAIN, CCNET_ERR_INTERNAL,
"Bad arguments");
return -1;
}
ret = ccnet_group_manager_unset_admin (group_mgr, group_id, member_name,
error);
return ret;
}
int
ccnet_rpc_set_group_name (int group_id, const char *group_name,
GError **error)
{
CcnetGroupManager *group_mgr = seaf->group_mgr;
int ret;
if (group_id <= 0 || !group_name) {
g_set_error (error, CCNET_DOMAIN, CCNET_ERR_INTERNAL,
"Bad arguments");
return -1;
}
ret = ccnet_group_manager_set_group_name (group_mgr, group_id, group_name,
error);
return ret;
}
int
ccnet_rpc_quit_group (int group_id, const char *user_name, GError **error)
{
CcnetGroupManager *group_mgr = seaf->group_mgr;
int ret;
if (group_id <= 0 || !user_name) {
g_set_error (error, CCNET_DOMAIN, CCNET_ERR_INTERNAL,
"Group id and user name can not be NULL");
return -1;
}
ret = ccnet_group_manager_quit_group (group_mgr, group_id, user_name, error);
return ret;
}
GList *
ccnet_rpc_get_groups (const char *username, int return_ancestors, GError **error)
{
CcnetGroupManager *group_mgr = seaf->group_mgr;
GList *ret = NULL;
if (!username) {
g_set_error (error, CCNET_DOMAIN, CCNET_ERR_INTERNAL,
"User name can not be NULL");
return NULL;
}
ret = ccnet_group_manager_get_groups_by_user (group_mgr, username,
return_ancestors ? TRUE : FALSE, error);
return ret;
}
GList *
ccnet_rpc_list_all_departments (GError **error)
{
CcnetGroupManager *group_mgr = seaf->group_mgr;
GList *ret = NULL;
ret = ccnet_group_manager_list_all_departments (group_mgr, error);
return ret;
}
GList*
seafile_get_repos_by_id_prefix (const char *id_prefix, int start,
int limit, GError **error)
{
GList *ret = NULL;
GList *repos, *ptr;
if (!id_prefix) {
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, "Argument should not be null");
return NULL;
}
repos = seaf_repo_manager_get_repos_by_id_prefix (seaf->repo_mgr, id_prefix,
start, limit);
ret = convert_repo_list (repos);
for(ptr = repos; ptr; ptr = ptr->next) {
seaf_repo_unref ((SeafRepo *)ptr->data);
}
g_list_free (repos);
return ret;
}
GList *
ccnet_rpc_get_all_groups (int start, int limit,
const char *source, GError **error)
{
CcnetGroupManager *group_mgr = seaf->group_mgr;
GList *ret = NULL;
ret = ccnet_group_manager_get_all_groups (group_mgr, start, limit, error);
return ret;
}
GList *
ccnet_rpc_get_ancestor_groups (int group_id, GError ** error)
{
CcnetGroupManager *group_mgr = seaf->group_mgr;
GList *ret = NULL;
ret = ccnet_group_manager_get_ancestor_groups (group_mgr, group_id);
return ret;
}
GObject *
ccnet_rpc_get_group (int group_id, GError **error)
{
CcnetGroupManager *group_mgr = seaf->group_mgr;
CcnetGroup *group = NULL;
group = ccnet_group_manager_get_group (group_mgr, group_id, error);
if (!group) {
return NULL;
}
/* g_object_ref (group); */
return (GObject *)group;
}
GList *
ccnet_rpc_get_group_members (int group_id, int start, int limit, GError **error)
{
CcnetGroupManager *group_mgr = seaf->group_mgr;
GList *ret = NULL;
if (start < 0 ) {
start = 0;
}
ret = ccnet_group_manager_get_group_members (group_mgr, group_id, start, limit, error);
if (ret == NULL)
return NULL;
return g_list_reverse (ret);
}
GList *
ccnet_rpc_get_members_with_prefix(int group_id, const char *prefix, GError **error)
{
CcnetGroupManager *group_mgr = seaf->group_mgr;
GList *ret = NULL;
ret = ccnet_group_manager_get_members_with_prefix (group_mgr, group_id, prefix, error);
return ret;
}
int
ccnet_rpc_check_group_staff (int group_id, const char *user_name, int in_structure,
GError **error)
{
CcnetGroupManager *group_mgr = seaf->group_mgr;
if (group_id <= 0 || !user_name) {
g_set_error (error, CCNET_DOMAIN, CCNET_ERR_INTERNAL,
"Bad arguments");
return -1;
}
return ccnet_group_manager_check_group_staff (group_mgr,
group_id, user_name,
in_structure ? TRUE : FALSE);
}
int
ccnet_rpc_remove_group_user (const char *user, GError **error)
{
CcnetGroupManager *group_mgr = seaf->group_mgr;
if (!user) {
g_set_error (error, CCNET_DOMAIN, CCNET_ERR_INTERNAL, "Bad arguments");
return -1;
}
return ccnet_group_manager_remove_group_user (group_mgr, user);
}
int
ccnet_rpc_is_group_user (int group_id, const char *user, int in_structure, GError **error)
{
CcnetGroupManager *group_mgr = seaf->group_mgr;
if (!user || group_id < 0) {
g_set_error (error, CCNET_DOMAIN, CCNET_ERR_INTERNAL, "Bad arguments");
return 0;
}
return ccnet_group_manager_is_group_user (group_mgr, group_id, user, in_structure ? TRUE : FALSE);
}
int
ccnet_rpc_set_group_creator (int group_id, const char *user_name,
GError **error)
{
CcnetGroupManager *group_mgr = seaf->group_mgr;
if (!user_name || group_id < 0) {
g_set_error (error, CCNET_DOMAIN, CCNET_ERR_INTERNAL, "Bad arguments");
return -1;
}
return ccnet_group_manager_set_group_creator (group_mgr, group_id,
user_name);
}
GList *
ccnet_rpc_get_groups_members (const char *group_ids, GError **error)
{
if (!group_ids || g_strcmp0(group_ids, "") == 0) {
g_set_error (error, CCNET_DOMAIN, CCNET_ERR_INTERNAL, "Bad arguments");
return NULL;
}
CcnetGroupManager *group_mgr = seaf->group_mgr;
return ccnet_group_manager_get_groups_members (group_mgr, group_ids, error);
}
int
ccnet_rpc_create_org (const char *org_name, const char *url_prefix,
const char *creator, GError **error)
{
CcnetOrgManager *org_mgr = seaf->org_mgr;
if (!org_name || !url_prefix || !creator) {
g_set_error (error, CCNET_DOMAIN, CCNET_ERR_INTERNAL, "Bad arguments");
return -1;
}
return ccnet_org_manager_create_org (org_mgr, org_name, url_prefix, creator,
error);
}
int
ccnet_rpc_remove_org (int org_id, GError **error)
{
GList *group_ids = NULL, *email_list=NULL, *ptr;
const char *url_prefix = NULL;
CcnetOrgManager *org_mgr = seaf->org_mgr;
CcnetUserManager *user_mgr = seaf->user_mgr;
CcnetGroupManager *group_mgr = seaf->group_mgr;
if (org_id < 0) {
g_set_error (error, CCNET_DOMAIN, CCNET_ERR_INTERNAL, "Bad arguments");
return -1;
}
url_prefix = ccnet_org_manager_get_url_prefix_by_org_id (org_mgr, org_id,
error);
email_list = ccnet_org_manager_get_org_emailusers (org_mgr, url_prefix,
0, INT_MAX);
ptr = email_list;
while (ptr) {
ccnet_user_manager_remove_emailuser (user_mgr, "DB", (gchar *)ptr->data);
ptr = ptr->next;
}
string_list_free (email_list);
group_ids = ccnet_org_manager_get_org_group_ids (org_mgr, org_id, 0, INT_MAX);
ptr = group_ids;
while (ptr) {
ccnet_group_manager_remove_group (group_mgr, (int)(long)ptr->data, TRUE, error);
ptr = ptr->next;
}
g_list_free (group_ids);
return ccnet_org_manager_remove_org (org_mgr, org_id, error);
}
GList *
ccnet_rpc_get_all_orgs (int start, int limit, GError **error)
{
CcnetOrgManager *org_mgr = seaf->org_mgr;
GList *ret = NULL;
ret = ccnet_org_manager_get_all_orgs (org_mgr, start, limit);
return ret;
}
gint64
ccnet_rpc_count_orgs (GError **error)
{
CcnetOrgManager *org_mgr = seaf->org_mgr;
return ccnet_org_manager_count_orgs(org_mgr);
}
GObject *
ccnet_rpc_get_org_by_url_prefix (const char *url_prefix, GError **error)
{
CcnetOrganization *org = NULL;
CcnetOrgManager *org_mgr = seaf->org_mgr;
if (!url_prefix) {
g_set_error (error, CCNET_DOMAIN, CCNET_ERR_INTERNAL, "Bad arguments");
return NULL;
}
org = ccnet_org_manager_get_org_by_url_prefix (org_mgr, url_prefix, error);
if (!org)
return NULL;
return (GObject *)org;
}
GObject *
ccnet_rpc_get_org_by_id (int org_id, GError **error)
{
CcnetOrganization *org = NULL;
CcnetOrgManager *org_mgr = seaf->org_mgr;
if (org_id <= 0) {
g_set_error (error, CCNET_DOMAIN, CCNET_ERR_INTERNAL, "Bad arguments");
return NULL;
}
org = ccnet_org_manager_get_org_by_id (org_mgr, org_id, error);
if (!org)
return NULL;
return (GObject *)org;
}
int
ccnet_rpc_add_org_user (int org_id, const char *email, int is_staff,
GError **error)
{
CcnetOrgManager *org_mgr = seaf->org_mgr;
if (org_id < 0 || !email) {
g_set_error (error, CCNET_DOMAIN, CCNET_ERR_INTERNAL, "Bad arguments");
return -1;
}
return ccnet_org_manager_add_org_user (org_mgr, org_id, email, is_staff,
error);
}
int
ccnet_rpc_remove_org_user (int org_id, const char *email, GError **error)
{
CcnetOrgManager *org_mgr = seaf->org_mgr;
if (org_id < 0 || !email) {
g_set_error (error, CCNET_DOMAIN, CCNET_ERR_INTERNAL, "Bad arguments");
return -1;
}
return ccnet_org_manager_remove_org_user (org_mgr, org_id, email, error);
}
GList *
ccnet_rpc_get_orgs_by_user (const char *email, GError **error)
{
CcnetOrgManager *org_mgr = seaf->org_mgr;
GList *org_list = NULL;
org_list = ccnet_org_manager_get_orgs_by_user (org_mgr, email, error);
return org_list;
}
GList *
ccnet_rpc_get_org_emailusers (const char *url_prefix, int start , int limit,
GError **error)
{
CcnetUserManager *user_mgr = seaf->user_mgr;
CcnetOrgManager *org_mgr = seaf->org_mgr;
GList *email_list = NULL, *ptr;
GList *ret = NULL;
if (!url_prefix) {
g_set_error (error, CCNET_DOMAIN, CCNET_ERR_INTERNAL, "Bad arguments");
return NULL;
}
email_list = ccnet_org_manager_get_org_emailusers (org_mgr, url_prefix,
start, limit);
if (email_list == NULL) {
return NULL;
}
ptr = email_list;
while (ptr) {
char *email = ptr->data;
CcnetEmailUser *emailuser = ccnet_user_manager_get_emailuser (user_mgr,
email, NULL);
if (emailuser != NULL) {
ret = g_list_prepend (ret, emailuser);
}
ptr = ptr->next;
}
string_list_free (email_list);
return g_list_reverse (ret);
}
int
ccnet_rpc_add_org_group (int org_id, int group_id, GError **error)
{
CcnetOrgManager *org_mgr = seaf->org_mgr;
if (org_id < 0 || group_id < 0) {
g_set_error (error, CCNET_DOMAIN, CCNET_ERR_INTERNAL, "Bad arguments");
return -1;
}
return ccnet_org_manager_add_org_group (org_mgr, org_id, group_id, error);
}
int
ccnet_rpc_remove_org_group (int org_id, int group_id, GError **error)
{
CcnetOrgManager *org_mgr = seaf->org_mgr;
if (org_id < 0 || group_id < 0) {
g_set_error (error, CCNET_DOMAIN, CCNET_ERR_INTERNAL, "Bad arguments");
return -1;
}
return ccnet_org_manager_remove_org_group (org_mgr, org_id, group_id,
error);
}
int
ccnet_rpc_is_org_group (int group_id, GError **error)
{
CcnetOrgManager *org_mgr = seaf->org_mgr;
if (group_id <= 0) {
g_set_error (error, CCNET_DOMAIN, CCNET_ERR_INTERNAL, "Bad arguments");
return -1;
}
return ccnet_org_manager_is_org_group (org_mgr, group_id, error);
}
int
ccnet_rpc_get_org_id_by_group (int group_id, GError **error)
{
CcnetOrgManager *org_mgr = seaf->org_mgr;
if (group_id <= 0) {
g_set_error (error, CCNET_DOMAIN, CCNET_ERR_INTERNAL, "Bad arguments");
return -1;
}
return ccnet_org_manager_get_org_id_by_group (org_mgr, group_id, error);
}
GList *
ccnet_rpc_get_org_groups (int org_id, int start, int limit, GError **error)
{
CcnetOrgManager *org_mgr = seaf->org_mgr;
GList *ret = NULL;
if (org_id < 0) {
g_set_error (error, CCNET_DOMAIN, CCNET_ERR_INTERNAL, "Bad arguments");
return NULL;
}
/* correct parameter */
if (start < 0 ) {
start = 0;
}
ret = ccnet_org_manager_get_org_groups (org_mgr, org_id, start, limit);
return ret;
}
GList *
ccnet_rpc_get_org_groups_by_user (const char *user, int org_id, GError **error)
{
CcnetOrgManager *org_mgr = seaf->org_mgr;
GList *ret = NULL;
if (org_id < 0 || !user) {
g_set_error (error, CCNET_DOMAIN, CCNET_ERR_INTERNAL, "Bad arguments");
return NULL;
}
ret = ccnet_org_manager_get_org_groups_by_user (org_mgr, user, org_id);
return ret;
}
GList *
ccnet_rpc_get_org_top_groups (int org_id, GError **error)
{
CcnetOrgManager *org_mgr = seaf->org_mgr;
GList *ret = NULL;
if (org_id < 0) {
g_set_error (error, CCNET_DOMAIN, CCNET_ERR_INTERNAL, "Bad arguments");
return NULL;
}
ret = ccnet_org_manager_get_org_top_groups (org_mgr, org_id, error);
return ret;
}
int
ccnet_rpc_org_user_exists (int org_id, const char *email, GError **error)
{
CcnetOrgManager *org_mgr = seaf->org_mgr;
if (org_id < 0 || !email) {
g_set_error (error, CCNET_DOMAIN, CCNET_ERR_INTERNAL, "Bad arguments");
return -1;
}
return ccnet_org_manager_org_user_exists (org_mgr, org_id, email, error);
}
int
ccnet_rpc_is_org_staff (int org_id, const char *email, GError **error)
{
CcnetOrgManager *org_mgr = seaf->org_mgr;
if (org_id < 0 || !email) {
g_set_error (error, CCNET_DOMAIN, CCNET_ERR_INTERNAL, "Bad arguments");
return -1;
}
return ccnet_org_manager_is_org_staff (org_mgr, org_id, email, error);
}
int
ccnet_rpc_set_org_staff (int org_id, const char *email, GError **error)
{
CcnetOrgManager *org_mgr = seaf->org_mgr;
if (org_id < 0 || !email) {
g_set_error (error, CCNET_DOMAIN, CCNET_ERR_INTERNAL, "Bad arguments");
return -1;
}
return ccnet_org_manager_set_org_staff (org_mgr, org_id, email, error);
}
int
ccnet_rpc_unset_org_staff (int org_id, const char *email, GError **error)
{
CcnetOrgManager *org_mgr = seaf->org_mgr;
if (org_id < 0 || !email) {
g_set_error (error, CCNET_DOMAIN, CCNET_ERR_INTERNAL, "Bad arguments");
return -1;
}
return ccnet_org_manager_unset_org_staff (org_mgr, org_id, email, error);
}
int
ccnet_rpc_set_org_name (int org_id, const char *org_name, GError **error)
{
CcnetOrgManager *org_mgr = seaf->org_mgr;
if (org_id < 0 || !org_name) {
g_set_error (error, CCNET_DOMAIN, CCNET_ERR_INTERNAL, "Bad arguments");
return -1;
}
return ccnet_org_manager_set_org_name (org_mgr, org_id, org_name, error);
}
#endif /* SEAFILE_SERVER */
================================================
FILE: common/seaf-db.c
================================================
#include "common.h"
#include "log.h"
#include "seaf-db.h"
#include
#ifdef HAVE_MYSQL
#include
#include
#endif
#include
#include
struct DBConnPool {
GPtrArray *connections;
pthread_mutex_t lock;
int max_connections;
};
typedef struct DBConnPool DBConnPool;
struct SeafDB {
int type;
DBConnPool *pool;
};
typedef struct DBConnection {
gboolean is_available;
gboolean delete_pending;
DBConnPool *pool;
} DBConnection;
struct SeafDBRow {
/* Empty */
};
struct SeafDBTrans {
DBConnection *conn;
gboolean need_close;
};
typedef struct DBOperations {
DBConnection* (*get_connection)(SeafDB *db);
void (*release_connection)(DBConnection *conn, gboolean need_close);
int (*execute_sql_no_stmt)(DBConnection *conn, const char *sql, gboolean *retry);
int (*execute_sql)(DBConnection *conn, const char *sql,
int n, va_list args, gboolean *retry);
int (*query_foreach_row)(DBConnection *conn,
const char *sql, SeafDBRowFunc callback, void *data,
int n, va_list args, gboolean *retry);
int (*row_get_column_count)(SeafDBRow *row);
const char* (*row_get_column_string)(SeafDBRow *row, int idx);
int (*row_get_column_int)(SeafDBRow *row, int idx);
gint64 (*row_get_column_int64)(SeafDBRow *row, int idx);
} DBOperations;
static DBOperations db_ops;
#ifdef HAVE_MYSQL
/* MySQL Ops */
static SeafDB *
mysql_db_new (const char *host,
int port,
const char *user,
const char *password,
const char *db_name,
const char *unix_socket,
gboolean use_ssl,
gboolean skip_verify,
const char *ca_path,
const char *charset);
static DBConnection *
mysql_db_get_connection (SeafDB *db);
static void
mysql_db_release_connection (DBConnection *vconn);
static int
mysql_db_execute_sql_no_stmt (DBConnection *vconn, const char *sql, gboolean *retry);
static int
mysql_db_execute_sql (DBConnection *vconn, const char *sql, int n, va_list args, gboolean *retry);
static int
mysql_db_query_foreach_row (DBConnection *vconn, const char *sql,
SeafDBRowFunc callback, void *data,
int n, va_list args, gboolean *retry);
static int
mysql_db_row_get_column_count (SeafDBRow *row);
static const char *
mysql_db_row_get_column_string (SeafDBRow *row, int idx);
static int
mysql_db_row_get_column_int (SeafDBRow *row, int idx);
static gint64
mysql_db_row_get_column_int64 (SeafDBRow *row, int idx);
static gboolean
mysql_db_connection_ping (DBConnection *vconn);
static DBConnPool *
init_conn_pool_common (int max_connections)
{
DBConnPool *pool = g_new0(DBConnPool, 1);
pool->connections = g_ptr_array_sized_new (max_connections);
pthread_mutex_init (&pool->lock, NULL);
pool->max_connections = max_connections;
return pool;
}
static DBConnection *
mysql_conn_pool_get_connection (SeafDB *db)
{
DBConnPool *pool = db->pool;
DBConnection *conn = NULL;
DBConnection *d_conn = NULL;
if (pool->max_connections == 0) {
conn = mysql_db_get_connection (db);
conn->pool = pool;
return conn;
}
pthread_mutex_lock (&pool->lock);
guint i, size = pool->connections->len;
for (i = 0; i < size; ++i) {
conn = g_ptr_array_index (pool->connections, i);
if (!conn->is_available) {
continue;
}
if (mysql_db_connection_ping (conn)) {
conn->is_available = FALSE;
goto out;
}
conn->is_available = FALSE;
conn->delete_pending = TRUE;
}
conn = NULL;
if (size < pool->max_connections) {
conn = mysql_db_get_connection (db);
if (conn) {
conn->pool = pool;
conn->is_available = FALSE;
g_ptr_array_add (pool->connections, conn);
}
}
out:
size = pool->connections->len;
if (size > 0) {
int index;
for (index = size - 1; index >= 0; index--) {
d_conn = g_ptr_array_index (pool->connections, index);
if (d_conn->delete_pending) {
g_ptr_array_remove (pool->connections, d_conn);
mysql_db_release_connection (d_conn);
}
}
}
pthread_mutex_unlock (&pool->lock);
return conn;
}
static void
mysql_conn_pool_release_connection (DBConnection *conn, gboolean need_close)
{
if (!conn)
return;
if (conn->pool->max_connections == 0) {
mysql_db_release_connection (conn);
return;
}
if (need_close) {
pthread_mutex_lock (&conn->pool->lock);
g_ptr_array_remove (conn->pool->connections, conn);
pthread_mutex_unlock (&conn->pool->lock);
mysql_db_release_connection (conn);
return;
}
pthread_mutex_lock (&conn->pool->lock);
conn->is_available = TRUE;
pthread_mutex_unlock (&conn->pool->lock);
}
#define KEEPALIVE_INTERVAL 30
static void *
mysql_conn_keepalive (void *arg)
{
DBConnPool *pool = arg;
DBConnection *conn = NULL;
DBConnection *d_conn = NULL;
char *sql = "SELECT 1;";
int rc = 0;
va_list args;
while (1) {
pthread_mutex_lock (&pool->lock);
guint i, size = pool->connections->len;
for (i = 0; i < size; ++i) {
conn = g_ptr_array_index (pool->connections, i);
if (conn->is_available) {
rc = db_ops.execute_sql (conn, sql, 0, args, NULL);
if (rc < 0) {
conn->is_available = FALSE;
conn->delete_pending = TRUE;
}
}
}
if (size > 0) {
int index;
for (index = size - 1; index >= 0; index--) {
d_conn = g_ptr_array_index (pool->connections, index);
if (d_conn->delete_pending) {
g_ptr_array_remove (pool->connections, d_conn);
mysql_db_release_connection (d_conn);
}
}
}
pthread_mutex_unlock (&pool->lock);
sleep (KEEPALIVE_INTERVAL);
}
return NULL;
}
SeafDB *
seaf_db_new_mysql (const char *host,
int port,
const char *user,
const char *passwd,
const char *db_name,
const char *unix_socket,
gboolean use_ssl,
gboolean skip_verify,
const char *ca_path,
const char *charset,
int max_connections)
{
SeafDB *db;
db = mysql_db_new (host, port, user, passwd, db_name, unix_socket, use_ssl, skip_verify, ca_path, charset);
if (!db)
return NULL;
db->type = SEAF_DB_TYPE_MYSQL;
db_ops.get_connection = mysql_conn_pool_get_connection;
db_ops.release_connection = mysql_conn_pool_release_connection;
db_ops.execute_sql_no_stmt = mysql_db_execute_sql_no_stmt;
db_ops.execute_sql = mysql_db_execute_sql;
db_ops.query_foreach_row = mysql_db_query_foreach_row;
db_ops.row_get_column_count = mysql_db_row_get_column_count;
db_ops.row_get_column_string = mysql_db_row_get_column_string;
db_ops.row_get_column_int = mysql_db_row_get_column_int;
db_ops.row_get_column_int64 = mysql_db_row_get_column_int64;
db->pool = init_conn_pool_common (max_connections);
pthread_t tid;
int ret = pthread_create (&tid, NULL, mysql_conn_keepalive, db->pool);
if (ret != 0) {
seaf_warning ("Failed to create mysql connection keepalive thread.\n");
return NULL;
}
pthread_detach (tid);
return db;
}
#endif
/* SQLite Ops */
static SeafDB *
sqlite_db_new (const char *db_path);
static DBConnection *
sqlite_db_get_connection (SeafDB *db);
static void
sqlite_db_release_connection (DBConnection *vconn, gboolean need_close);
static int
sqlite_db_execute_sql_no_stmt (DBConnection *vconn, const char *sql, gboolean *retry);
static int
sqlite_db_execute_sql (DBConnection *vconn, const char *sql, int n, va_list args, gboolean *retry);
static int
sqlite_db_query_foreach_row (DBConnection *vconn, const char *sql,
SeafDBRowFunc callback, void *data,
int n, va_list args, gboolean *retry);
static int
sqlite_db_row_get_column_count (SeafDBRow *row);
static const char *
sqlite_db_row_get_column_string (SeafDBRow *row, int idx);
static int
sqlite_db_row_get_column_int (SeafDBRow *row, int idx);
static gint64
sqlite_db_row_get_column_int64 (SeafDBRow *row, int idx);
SeafDB *
seaf_db_new_sqlite (const char *db_path, int max_connections)
{
SeafDB *db;
db = sqlite_db_new (db_path);
if (!db)
return NULL;
db->type = SEAF_DB_TYPE_SQLITE;
db_ops.get_connection = sqlite_db_get_connection;
db_ops.release_connection = sqlite_db_release_connection;
db_ops.execute_sql_no_stmt = sqlite_db_execute_sql_no_stmt;
db_ops.execute_sql = sqlite_db_execute_sql;
db_ops.query_foreach_row = sqlite_db_query_foreach_row;
db_ops.row_get_column_count = sqlite_db_row_get_column_count;
db_ops.row_get_column_string = sqlite_db_row_get_column_string;
db_ops.row_get_column_int = sqlite_db_row_get_column_int;
db_ops.row_get_column_int64 = sqlite_db_row_get_column_int64;
return db;
}
int
seaf_db_type (SeafDB *db)
{
return db->type;
}
int
seaf_db_query (SeafDB *db, const char *sql)
{
int ret = -1;
int retry_count = 0;
while (ret < 0) {
gboolean retry = FALSE;
DBConnection *conn = db_ops.get_connection (db);
if (!conn)
return -1;
ret = db_ops.execute_sql_no_stmt (conn, sql, &retry);
db_ops.release_connection (conn, ret < 0);
if (!retry || retry_count >= 3) {
break;
}
retry_count++;
seaf_warning ("The mysql connection has expired, creating a new connection to re-query.\n");
}
return ret;
}
gboolean
seaf_db_check_for_existence (SeafDB *db, const char *sql, gboolean *db_err)
{
return seaf_db_statement_exists (db, sql, db_err, 0);
}
int
seaf_db_foreach_selected_row (SeafDB *db, const char *sql,
SeafDBRowFunc callback, void *data)
{
return seaf_db_statement_foreach_row (db, sql, callback, data, 0);
}
const char *
seaf_db_row_get_column_text (SeafDBRow *row, guint32 idx)
{
g_return_val_if_fail (idx < db_ops.row_get_column_count(row), NULL);
return db_ops.row_get_column_string (row, idx);
}
int
seaf_db_row_get_column_int (SeafDBRow *row, guint32 idx)
{
g_return_val_if_fail (idx < db_ops.row_get_column_count(row), -1);
return db_ops.row_get_column_int (row, idx);
}
gint64
seaf_db_row_get_column_int64 (SeafDBRow *row, guint32 idx)
{
g_return_val_if_fail (idx < db_ops.row_get_column_count(row), -1);
return db_ops.row_get_column_int64 (row, idx);
}
int
seaf_db_get_int (SeafDB *db, const char *sql)
{
return seaf_db_statement_get_int (db, sql, 0);
}
gint64
seaf_db_get_int64 (SeafDB *db, const char *sql)
{
return seaf_db_statement_get_int64 (db, sql, 0);
}
char *
seaf_db_get_string (SeafDB *db, const char *sql)
{
return seaf_db_statement_get_string (db, sql, 0);
}
int
seaf_db_statement_query (SeafDB *db, const char *sql, int n, ...)
{
int ret = -1;
int retry_count = 0;
while (ret < 0) {
gboolean retry = FALSE;
DBConnection *conn = db_ops.get_connection (db);
if (!conn)
return -1;
va_list args;
va_start (args, n);
ret = db_ops.execute_sql (conn, sql, n, args, &retry);
va_end (args);
db_ops.release_connection (conn, ret < 0);
if (!retry || retry_count >= 3) {
break;
}
retry_count++;
seaf_warning ("The mysql connection has expired, creating a new connection to re-query.\n");
}
return ret;
}
gboolean
seaf_db_statement_exists (SeafDB *db, const char *sql, gboolean *db_err, int n, ...)
{
int n_rows = -1;
int retry_count = 0;
while (n_rows < 0) {
gboolean retry = FALSE;
DBConnection *conn = db_ops.get_connection(db);
if (!conn) {
*db_err = TRUE;
return FALSE;
}
va_list args;
va_start (args, n);
n_rows = db_ops.query_foreach_row (conn, sql, NULL, NULL, n, args, &retry);
va_end (args);
db_ops.release_connection(conn, n_rows < 0);
if (!retry || retry_count >= 3) {
break;
}
retry_count++;
seaf_warning ("The mysql connection has expired, creating a new connection to re-query.\n");
}
if (n_rows < 0) {
*db_err = TRUE;
return FALSE;
} else {
*db_err = FALSE;
return (n_rows != 0);
}
}
int
seaf_db_statement_foreach_row (SeafDB *db, const char *sql,
SeafDBRowFunc callback, void *data,
int n, ...)
{
int ret = -1;
int retry_count = 0;
while (ret < 0) {
gboolean retry = FALSE;
DBConnection *conn = db_ops.get_connection (db);
if (!conn)
return -1;
va_list args;
va_start (args, n);
ret = db_ops.query_foreach_row (conn, sql, callback, data, n, args, &retry);
va_end (args);
db_ops.release_connection (conn, ret < 0);
if (!retry || retry_count >= 3) {
break;
}
retry_count++;
seaf_warning ("The mysql connection has expired, creating a new connection to re-query.\n");
}
return ret;
}
static gboolean
get_int_cb (SeafDBRow *row, void *data)
{
int *pret = (int*)data;
*pret = seaf_db_row_get_column_int (row, 0);
return FALSE;
}
int
seaf_db_statement_get_int (SeafDB *db, const char *sql, int n, ...)
{
int ret = -1;
int rc = -1;
int retry_count = 0;
while (rc < 0) {
gboolean retry = FALSE;
DBConnection *conn = db_ops.get_connection (db);
if (!conn)
return -1;
va_list args;
va_start (args, n);
rc = db_ops.query_foreach_row (conn, sql, get_int_cb, &ret, n, args, &retry);
va_end (args);
db_ops.release_connection (conn, rc < 0);
if (!retry || retry_count >= 3) {
break;
}
retry_count++;
seaf_warning ("The mysql connection has expired, creating a new connection to re-query.\n");
}
return ret;
}
static gboolean
get_int64_cb (SeafDBRow *row, void *data)
{
gint64 *pret = (gint64*)data;
*pret = seaf_db_row_get_column_int64 (row, 0);
return FALSE;
}
gint64
seaf_db_statement_get_int64 (SeafDB *db, const char *sql, int n, ...)
{
gint64 ret = -1;
int rc = -1;
int retry_count = 0;
while (rc < 0) {
gboolean retry = FALSE;
DBConnection *conn = db_ops.get_connection (db);
if (!conn)
return -1;
va_list args;
va_start (args, n);
rc = db_ops.query_foreach_row (conn, sql, get_int64_cb, &ret, n, args, &retry);
va_end(args);
db_ops.release_connection (conn, rc < 0);
if (!retry || retry_count >= 3) {
break;
}
retry_count++;
seaf_warning ("The mysql connection has expired, creating a new connection to re-query.\n");
}
return ret;
}
static gboolean
get_string_cb (SeafDBRow *row, void *data)
{
char **pret = (char**)data;
*pret = g_strdup(seaf_db_row_get_column_text (row, 0));
return FALSE;
}
char *
seaf_db_statement_get_string (SeafDB *db, const char *sql, int n, ...)
{
char *ret = NULL;
int rc = -1;
int retry_count = 0;
while (rc < 0) {
gboolean retry = FALSE;
DBConnection *conn = db_ops.get_connection (db);
if (!conn)
return NULL;
va_list args;
va_start (args, n);
rc = db_ops.query_foreach_row (conn, sql, get_string_cb, &ret, n, args, &retry);
va_end(args);
db_ops.release_connection (conn, rc < 0);
if (!retry || retry_count >= 3) {
break;
}
retry_count++;
seaf_warning ("The mysql connection has expired, creating a new connection to re-query.\n");
}
return ret;
}
/* Transaction */
SeafDBTrans *
seaf_db_begin_transaction (SeafDB *db)
{
SeafDBTrans *trans = NULL;
DBConnection *conn = db_ops.get_connection(db);
if (!conn) {
return trans;
}
if (db_ops.execute_sql_no_stmt (conn, "BEGIN", NULL) < 0) {
db_ops.release_connection (conn, TRUE);
return trans;
}
trans = g_new0 (SeafDBTrans, 1);
trans->conn = conn;
return trans;
}
void
seaf_db_trans_close (SeafDBTrans *trans)
{
db_ops.release_connection (trans->conn, trans->need_close);
g_free (trans);
}
int
seaf_db_commit (SeafDBTrans *trans)
{
DBConnection *conn = trans->conn;
if (db_ops.execute_sql_no_stmt (conn, "COMMIT", NULL) < 0) {
trans->need_close = TRUE;
return -1;
}
return 0;
}
int
seaf_db_rollback (SeafDBTrans *trans)
{
DBConnection *conn = trans->conn;
if (db_ops.execute_sql_no_stmt (conn, "ROLLBACK", NULL) < 0) {
trans->need_close = TRUE;
return -1;
}
return 0;
}
int
seaf_db_trans_query (SeafDBTrans *trans, const char *sql, int n, ...)
{
int ret;
va_list args;
va_start (args, n);
ret = db_ops.execute_sql (trans->conn, sql, n, args, NULL);
va_end (args);
if (ret < 0)
trans->need_close = TRUE;
return ret;
}
gboolean
seaf_db_trans_check_for_existence (SeafDBTrans *trans,
const char *sql,
gboolean *db_err,
int n, ...)
{
int n_rows;
va_list args;
va_start (args, n);
n_rows = db_ops.query_foreach_row (trans->conn, sql, NULL, NULL, n, args, NULL);
va_end (args);
if (n_rows < 0) {
trans->need_close = TRUE;
*db_err = TRUE;
return FALSE;
} else {
*db_err = FALSE;
return (n_rows != 0);
}
}
int
seaf_db_trans_foreach_selected_row (SeafDBTrans *trans, const char *sql,
SeafDBRowFunc callback, void *data,
int n, ...)
{
int ret;
va_list args;
va_start (args, n);
ret = db_ops.query_foreach_row (trans->conn, sql, callback, data, n, args, NULL);
va_end (args);
if (ret < 0)
trans->need_close = TRUE;
return ret;
}
int
seaf_db_row_get_column_count (SeafDBRow *row)
{
return db_ops.row_get_column_count(row);
}
#ifdef HAVE_MYSQL
/* MySQL DB */
typedef struct MySQLDB {
struct SeafDB parent;
char *host;
char *user;
char *password;
unsigned int port;
char *db_name;
char *unix_socket;
gboolean use_ssl;
gboolean skip_verify;
char *ca_path;
char *charset;
} MySQLDB;
typedef struct MySQLDBConnection {
struct DBConnection parent;
MYSQL *db_conn;
} MySQLDBConnection;
static gboolean
mysql_db_connection_ping (DBConnection *vconn)
{
MySQLDBConnection *conn = (MySQLDBConnection *)vconn;
return (mysql_ping (conn->db_conn) == 0);
}
static SeafDB *
mysql_db_new (const char *host,
int port,
const char *user,
const char *password,
const char *db_name,
const char *unix_socket,
gboolean use_ssl,
gboolean skip_verify,
const char *ca_path,
const char *charset)
{
MySQLDB *db = g_new0 (MySQLDB, 1);
db->host = g_strdup (host);
db->user = g_strdup (user);
db->password = g_strdup (password);
db->port = port;
db->db_name = g_strdup(db_name);
db->unix_socket = g_strdup(unix_socket);
db->use_ssl = use_ssl;
db->skip_verify = skip_verify;
db->ca_path = g_strdup(ca_path);
db->charset = g_strdup(charset);
mysql_library_init (0, NULL, NULL);
return (SeafDB *)db;
}
typedef char my_bool;
static DBConnection *
mysql_db_get_connection (SeafDB *vdb)
{
MySQLDB *db = (MySQLDB *)vdb;
int conn_timeout = 1;
int read_write_timeout = 60;
MYSQL *db_conn;
MySQLDBConnection *conn = NULL;
int ssl_mode;
db_conn = mysql_init (NULL);
if (!db_conn) {
seaf_warning ("Failed to init mysql connection object.\n");
return NULL;
}
if (db->use_ssl && !db->skip_verify) {
#ifndef LIBMARIADB
// Set ssl_mode to SSL_MODE_VERIFY_IDENTITY to verify server cert.
// When ssl_mode is set to SSL_MODE_VERIFY_IDENTITY, MYSQL_OPT_SSL_CA is required to verify server cert.
// Refer to: https://dev.mysql.com/doc/c-api/5.7/en/mysql-options.html
ssl_mode = SSL_MODE_VERIFY_IDENTITY;
mysql_options(db_conn, MYSQL_OPT_SSL_MODE, &ssl_mode);
mysql_options(db_conn, MYSQL_OPT_SSL_CA, db->ca_path);
#else
static my_bool verify= 1;
mysql_optionsv(db_conn, MYSQL_OPT_SSL_VERIFY_SERVER_CERT, (void *)&verify);
mysql_options(db_conn, MYSQL_OPT_SSL_CA, db->ca_path);
#endif
} else if (db->use_ssl && db->skip_verify) {
#ifndef LIBMARIADB
// Set ssl_mode to SSL_MODE_PREFERRED to skip verify server cert.
ssl_mode = SSL_MODE_PREFERRED;
mysql_options(db_conn, MYSQL_OPT_SSL_MODE, &ssl_mode);
#else
static my_bool verify= 0;
mysql_optionsv(db_conn, MYSQL_OPT_SSL_VERIFY_SERVER_CERT, (void *)&verify);
#endif
} else {
#ifdef LIBMARIADB
static my_bool verify= 0;
mysql_optionsv(db_conn, MYSQL_OPT_SSL_VERIFY_SERVER_CERT, (void *)&verify);
#endif
}
if (db->charset)
mysql_options(db_conn, MYSQL_SET_CHARSET_NAME, db->charset);
if (db->unix_socket) {
int pro_type = MYSQL_PROTOCOL_SOCKET;
mysql_options (db_conn, MYSQL_OPT_PROTOCOL, &pro_type);
if (!db->user) {
#ifndef LIBMARIADB
mysql_options (db_conn, MYSQL_DEFAULT_AUTH, "unix_socket");
#else
mysql_options (db_conn, MARIADB_OPT_UNIXSOCKET, (void *)db->unix_socket);
#endif
}
}
mysql_options(db_conn, MYSQL_OPT_CONNECT_TIMEOUT, (const char*)&conn_timeout);
mysql_options(db_conn, MYSQL_OPT_READ_TIMEOUT, (const char*)&read_write_timeout);
mysql_options(db_conn, MYSQL_OPT_WRITE_TIMEOUT, (const char*)&read_write_timeout);
if (!mysql_real_connect(db_conn, db->host, db->user, db->password,
db->db_name, db->port,
db->unix_socket, CLIENT_MULTI_STATEMENTS)) {
seaf_warning ("Failed to connect to MySQL: %s\n", mysql_error(db_conn));
mysql_close (db_conn);
return NULL;
}
conn = g_new0 (MySQLDBConnection, 1);
conn->db_conn = db_conn;
return (DBConnection *)conn;
}
static void
mysql_db_release_connection (DBConnection *vconn)
{
if (!vconn)
return;
MySQLDBConnection *conn = (MySQLDBConnection *)vconn;
mysql_close (conn->db_conn);
g_free (conn);
}
static int
mysql_db_execute_sql_no_stmt (DBConnection *vconn, const char *sql, gboolean *retry)
{
MySQLDBConnection *conn = (MySQLDBConnection *)vconn;
int rc;
rc = mysql_query (conn->db_conn, sql);
if (rc == 0) {
return 0;
}
if (rc == CR_SERVER_GONE_ERROR || rc == CR_SERVER_LOST) {
if (retry)
*retry = TRUE;
}
seaf_warning ("Failed to execute sql %s: %s\n", sql, mysql_error(conn->db_conn));
return -1;
}
static MYSQL_STMT *
_prepare_stmt_mysql (MYSQL *db, const char *sql, gboolean *retry)
{
MYSQL_STMT *stmt;
stmt = mysql_stmt_init (db);
if (!stmt) {
seaf_warning ("mysql_stmt_init failed.\n");
return NULL;
}
if (mysql_stmt_prepare (stmt, sql, strlen(sql)) != 0) {
int err_code = mysql_stmt_errno (stmt);
if (err_code == CR_SERVER_GONE_ERROR || err_code == CR_SERVER_LOST) {
if (retry)
*retry = TRUE;
}
seaf_warning ("Failed to prepare sql %s: %s\n", sql, mysql_stmt_error(stmt));
mysql_stmt_close (stmt);
return NULL;
}
return stmt;
}
static int
_bind_params_mysql (MYSQL_STMT *stmt, MYSQL_BIND *params, int n, va_list args)
{
int i;
const char *type;
for (i = 0; i < n; ++i) {
type = va_arg (args, const char *);
if (strcmp(type, "int") == 0) {
int x = va_arg (args, int);
int *pval = g_new (int, 1);
*pval = x;
params[i].buffer_type = MYSQL_TYPE_LONG;
params[i].buffer = pval;
params[i].is_null = 0;
} else if (strcmp (type, "int64") == 0) {
gint64 x = va_arg (args, gint64);
gint64 *pval = g_new (gint64, 1);
*pval = x;
params[i].buffer_type = MYSQL_TYPE_LONGLONG;
params[i].buffer = pval;
params[i].is_null = 0;
} else if (strcmp (type, "string") == 0) {
const char *s = va_arg (args, const char *);
static my_bool yes = TRUE;
params[i].buffer_type = MYSQL_TYPE_STRING;
params[i].buffer = g_strdup(s);
unsigned long *plen = g_new (unsigned long, 1);
params[i].length = plen;
if (!s) {
*plen = 0;
params[i].buffer_length = 0;
params[i].is_null = &yes;
} else {
*plen = strlen(s);
params[i].buffer_length = *plen + 1;
params[i].is_null = 0;
}
} else {
seaf_warning ("BUG: invalid prep stmt parameter type %s.\n", type);
g_return_val_if_reached (-1);
}
}
if (mysql_stmt_bind_param (stmt, params) != 0) {
return -1;
}
return 0;
}
static int
mysql_db_execute_sql (DBConnection *vconn, const char *sql, int n, va_list args, gboolean *retry)
{
MySQLDBConnection *conn = (MySQLDBConnection *)vconn;
MYSQL *db = conn->db_conn;
MYSQL_STMT *stmt = NULL;
MYSQL_BIND *params = NULL;
int ret = 0;
stmt = _prepare_stmt_mysql (db, sql, retry);
if (!stmt) {
return -1;
}
if (n > 0) {
params = g_new0 (MYSQL_BIND, n);
if (_bind_params_mysql (stmt, params, n, args) < 0) {
seaf_warning ("Failed to bind parameters for %s: %s.\n",
sql, mysql_stmt_error(stmt));
ret = -1;
goto out;
}
}
if (mysql_stmt_execute (stmt) != 0) {
seaf_warning ("Failed to execute sql %s: %s\n", sql, mysql_stmt_error(stmt));
ret = -1;
goto out;
}
out:
if (ret < 0) {
int err_code = mysql_stmt_errno (stmt);
if (err_code == CR_SERVER_GONE_ERROR || err_code == CR_SERVER_LOST) {
if (retry)
*retry = TRUE;
}
}
if (stmt)
mysql_stmt_close (stmt);
if (params) {
int i;
for (i = 0; i < n; ++i) {
g_free (params[i].buffer);
g_free (params[i].length);
}
g_free (params);
}
return ret;
}
typedef struct MySQLDBRow {
SeafDBRow parent;
int column_count;
MYSQL_STMT *stmt;
MYSQL_BIND *results;
/* Used when returned columns are truncated. */
MYSQL_BIND *new_binds;
} MySQLDBRow;
#define DEFAULT_MYSQL_COLUMN_SIZE 1024
static int
mysql_db_query_foreach_row (DBConnection *vconn, const char *sql,
SeafDBRowFunc callback, void *data,
int n, va_list args, gboolean *retry)
{
MySQLDBConnection *conn = (MySQLDBConnection *)vconn;
MYSQL *db = conn->db_conn;
MYSQL_STMT *stmt = NULL;
MYSQL_BIND *params = NULL;
MySQLDBRow row;
int err_code;
int nrows = 0;
int i;
memset (&row, 0, sizeof(row));
stmt = _prepare_stmt_mysql (db, sql, retry);
if (!stmt) {
return -1;
}
if (n > 0) {
params = g_new0 (MYSQL_BIND, n);
if (_bind_params_mysql (stmt, params, n, args) < 0) {
nrows = -1;
err_code = mysql_stmt_errno (stmt);
if (err_code == CR_SERVER_GONE_ERROR || err_code == CR_SERVER_LOST) {
if (retry)
*retry = TRUE;
}
goto out;
}
}
if (mysql_stmt_execute (stmt) != 0) {
seaf_warning ("Failed to execute sql %s: %s\n", sql, mysql_stmt_error(stmt));
nrows = -1;
err_code = mysql_stmt_errno (stmt);
if (err_code == CR_SERVER_GONE_ERROR || err_code == CR_SERVER_LOST) {
if (retry)
*retry = TRUE;
}
goto out;
}
row.column_count = mysql_stmt_field_count (stmt);
row.stmt = stmt;
row.results = g_new0 (MYSQL_BIND, row.column_count);
for (i = 0; i < row.column_count; ++i) {
row.results[i].buffer = g_malloc (DEFAULT_MYSQL_COLUMN_SIZE + 1);
/* Ask MySQL to convert fields to string, to avoid the trouble of
* checking field types.
*/
row.results[i].buffer_type = MYSQL_TYPE_STRING;
row.results[i].buffer_length = DEFAULT_MYSQL_COLUMN_SIZE;
row.results[i].length = g_new0 (unsigned long, 1);
row.results[i].is_null = g_new0 (my_bool, 1);
}
row.new_binds = g_new0 (MYSQL_BIND, row.column_count);
if (mysql_stmt_bind_result (stmt, row.results) != 0) {
seaf_warning ("Failed to bind result for sql %s: %s\n", sql, mysql_stmt_error(stmt));
nrows = -1;
err_code = mysql_stmt_errno (stmt);
if (err_code == CR_SERVER_GONE_ERROR || err_code == CR_SERVER_LOST) {
if (retry)
*retry = TRUE;
}
goto out;
}
int rc;
gboolean next_row = TRUE;
while (1) {
rc = mysql_stmt_fetch (stmt);
if (rc == 1) {
seaf_warning ("Failed to fetch result for sql %s: %s\n",
sql, mysql_stmt_error(stmt));
nrows = -1;
// Don't need to retry, some rows may have been fetched.
goto out;
}
if (rc == MYSQL_NO_DATA)
break;
/* rc == 0 or rc == MYSQL_DATA_TRUNCATED */
++nrows;
if (callback)
next_row = callback ((SeafDBRow *)&row, data);
for (i = 0; i < row.column_count; ++i) {
g_free (row.new_binds[i].buffer);
g_free (row.new_binds[i].length);
g_free (row.new_binds[i].is_null);
memset (&row.new_binds[i], 0, sizeof(MYSQL_BIND));
}
if (!next_row)
break;
}
out:
if (stmt) {
mysql_stmt_free_result (stmt);
mysql_stmt_close (stmt);
}
if (params) {
for (i = 0; i < n; ++i) {
g_free (params[i].buffer);
g_free (params[i].length);
}
g_free (params);
}
if (row.results) {
for (i = 0; i < row.column_count; ++i) {
g_free (row.results[i].buffer);
g_free (row.results[i].length);
g_free (row.results[i].is_null);
}
g_free (row.results);
}
if (row.new_binds) {
for (i = 0; i < row.column_count; ++i) {
g_free (row.new_binds[i].buffer);
g_free (row.new_binds[i].length);
g_free (row.new_binds[i].is_null);
}
g_free (row.new_binds);
}
return nrows;
}
static int
mysql_db_row_get_column_count (SeafDBRow *vrow)
{
MySQLDBRow *row = (MySQLDBRow *)vrow;
return row->column_count;
}
static const char *
mysql_db_row_get_column_string (SeafDBRow *vrow, int i)
{
MySQLDBRow *row = (MySQLDBRow *)vrow;
if (*(row->results[i].is_null)) {
return NULL;
}
char *ret = NULL;
unsigned long real_length = *(row->results[i].length);
/* If column size is larger then allocated buffer size, re-allocate a new buffer
* and fetch the column directly.
*/
if (real_length > row->results[i].buffer_length) {
row->new_binds[i].buffer = g_malloc (real_length + 1);
row->new_binds[i].buffer_type = MYSQL_TYPE_STRING;
row->new_binds[i].buffer_length = real_length;
row->new_binds[i].length = g_new0 (unsigned long, 1);
row->new_binds[i].is_null = g_new0 (my_bool, 1);
if (mysql_stmt_fetch_column (row->stmt, &row->new_binds[i], i, 0) != 0) {
seaf_warning ("Faield to fetch column: %s\n", mysql_stmt_error(row->stmt));
return NULL;
}
ret = row->new_binds[i].buffer;
} else {
ret = row->results[i].buffer;
}
ret[real_length] = 0;
return ret;
}
static int
mysql_db_row_get_column_int (SeafDBRow *vrow, int idx)
{
const char *str;
char *e;
int ret;
str = mysql_db_row_get_column_string (vrow, idx);
if (!str) {
return 0;
}
errno = 0;
ret = strtol (str, &e, 10);
if (errno || (e == str)) {
seaf_warning ("Number conversion failed.\n");
return -1;
}
return ret;
}
static gint64
mysql_db_row_get_column_int64 (SeafDBRow *vrow, int idx)
{
const char *str;
char *e;
gint64 ret;
str = mysql_db_row_get_column_string (vrow, idx);
if (!str) {
return 0;
}
errno = 0;
ret = strtoll (str, &e, 10);
if (errno || (e == str)) {
seaf_warning ("Number conversion failed.\n");
return -1;
}
return ret;
}
#endif /* HAVE_MYSQL */
/* SQLite DB */
/* SQLite thread synchronization rountines.
* See https://www.sqlite.org/unlock_notify.html
*/
typedef struct UnlockNotification {
int fired;
pthread_cond_t cond;
pthread_mutex_t mutex;
} UnlockNotification;
static void
unlock_notify_cb(void **ap_arg, int n_arg)
{
int i;
for (i = 0; i < n_arg; i++) {
UnlockNotification *p = (UnlockNotification *)ap_arg[i];
pthread_mutex_lock (&p->mutex);
p->fired = 1;
pthread_cond_signal (&p->cond);
pthread_mutex_unlock (&p->mutex);
}
}
static int
wait_for_unlock_notify(sqlite3 *db)
{
UnlockNotification un;
un.fired = 0;
pthread_mutex_init (&un.mutex, NULL);
pthread_cond_init (&un.cond, NULL);
int rc = sqlite3_unlock_notify(db, unlock_notify_cb, (void *)&un);
if (rc == SQLITE_OK) {
pthread_mutex_lock(&un.mutex);
if (!un.fired)
pthread_cond_wait (&un.cond, &un.mutex);
pthread_mutex_unlock(&un.mutex);
}
pthread_cond_destroy (&un.cond);
pthread_mutex_destroy (&un.mutex);
return rc;
}
static int
sqlite3_blocking_step(sqlite3_stmt *stmt)
{
int rc;
while (SQLITE_LOCKED == (rc = sqlite3_step(stmt))) {
rc = wait_for_unlock_notify(sqlite3_db_handle(stmt));
if (rc != SQLITE_OK)
break;
sqlite3_reset(stmt);
}
return rc;
}
static int
sqlite3_blocking_prepare_v2(sqlite3 *db, const char *sql, int sql_len, sqlite3_stmt **pstmt, const char **pz)
{
int rc;
while (SQLITE_LOCKED == (rc = sqlite3_prepare_v2(db, sql, sql_len, pstmt, pz))) {
rc = wait_for_unlock_notify(db);
if (rc != SQLITE_OK)
break;
}
return rc;
}
static int
sqlite3_blocking_exec(sqlite3 *db, const char *sql, int (*callback)(void *, int, char **, char **), void *arg, char **errmsg)
{
int rc;
while (SQLITE_LOCKED == (rc = sqlite3_exec(db, sql, callback, arg, errmsg))) {
rc = wait_for_unlock_notify(db);
if (rc != SQLITE_OK)
break;
}
return rc;
}
typedef struct SQLiteDB {
SeafDB parent;
char *db_path;
} SQLiteDB;
typedef struct SQLiteDBConnection {
DBConnection parent;
sqlite3 *db_conn;
} SQLiteDBConnection;
static SeafDB *
sqlite_db_new (const char *db_path)
{
SQLiteDB *db = g_new0 (SQLiteDB, 1);
db->db_path = g_strdup(db_path);
return (SeafDB *)db;
}
static DBConnection *
sqlite_db_get_connection (SeafDB *vdb)
{
SQLiteDB *db = (SQLiteDB *)vdb;
sqlite3 *db_conn;
int result;
const char *errmsg;
SQLiteDBConnection *conn;
result = sqlite3_open_v2 (db->db_path, &db_conn, SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE | SQLITE_OPEN_SHAREDCACHE, NULL);
if (result != SQLITE_OK) {
errmsg = sqlite3_errmsg(db_conn);
seaf_warning ("Failed to open sqlite db: %s\n", errmsg ? errmsg : "no error given");
return NULL;
}
conn = g_new0 (SQLiteDBConnection, 1);
conn->db_conn = db_conn;
return (DBConnection *)conn;
}
static void
sqlite_db_release_connection (DBConnection *vconn, gboolean need_close)
{
if (!vconn)
return;
SQLiteDBConnection *conn = (SQLiteDBConnection *)vconn;
sqlite3_close (conn->db_conn);
g_free (conn);
}
static int
sqlite_db_execute_sql_no_stmt (DBConnection *vconn, const char *sql, gboolean *retry)
{
SQLiteDBConnection *conn = (SQLiteDBConnection *)vconn;
char *errmsg = NULL;
int rc;
rc = sqlite3_blocking_exec (conn->db_conn, sql, NULL, NULL, &errmsg);
if (rc != SQLITE_OK) {
seaf_warning ("sqlite3_exec failed %s: %s", sql, errmsg ? errmsg : "no error given");
if (errmsg)
sqlite3_free (errmsg);
return -1;
}
return 0;
}
static int
_bind_parameters_sqlite (sqlite3 *db, sqlite3_stmt *stmt, int n, va_list args)
{
int i;
const char *type;
for (i = 0; i < n; ++i) {
type = va_arg (args, const char *);
if (strcmp(type, "int") == 0) {
int x = va_arg (args, int);
if (sqlite3_bind_int (stmt, i+1, x) != SQLITE_OK) {
seaf_warning ("sqlite3_bind_int failed: %s\n", sqlite3_errmsg(db));
return -1;
}
} else if (strcmp (type, "int64") == 0) {
gint64 x = va_arg (args, gint64);
if (sqlite3_bind_int64 (stmt, i+1, x) != SQLITE_OK) {
seaf_warning ("sqlite3_bind_int64 failed: %s\n", sqlite3_errmsg(db));
return -1;
}
} else if (strcmp (type, "string") == 0) {
const char *s = va_arg (args, const char *);
if (sqlite3_bind_text (stmt, i+1, s, -1, SQLITE_TRANSIENT) != SQLITE_OK) {
seaf_warning ("sqlite3_bind_text failed: %s\n", sqlite3_errmsg(db));
return -1;
}
} else {
seaf_warning ("BUG: invalid prep stmt parameter type %s.\n", type);
g_return_val_if_reached (-1);
}
}
return 0;
}
static int
sqlite_db_execute_sql (DBConnection *vconn, const char *sql, int n, va_list args, gboolean *retry)
{
SQLiteDBConnection *conn = (SQLiteDBConnection *)vconn;
sqlite3 *db = conn->db_conn;
sqlite3_stmt *stmt;
int rc;
int ret = 0;
rc = sqlite3_blocking_prepare_v2 (db, sql, -1, &stmt, NULL);
if (rc != SQLITE_OK) {
seaf_warning ("sqlite3_prepare_v2 failed %s: %s", sql, sqlite3_errmsg(db));
return -1;
}
if (_bind_parameters_sqlite (db, stmt, n, args) < 0) {
seaf_warning ("Failed to bind parameters for sql %s\n", sql);
ret = -1;
goto out;
}
rc = sqlite3_blocking_step (stmt);
if (rc != SQLITE_DONE) {
seaf_warning ("sqlite3_step failed %s: %s", sql, sqlite3_errmsg(db));
ret = -1;
goto out;
}
out:
sqlite3_finalize (stmt);
return ret;
}
typedef struct SQLiteDBRow {
SeafDBRow parent;
int column_count;
sqlite3 *db;
sqlite3_stmt *stmt;
} SQLiteDBRow;
static int
sqlite_db_query_foreach_row (DBConnection *vconn, const char *sql,
SeafDBRowFunc callback, void *data,
int n, va_list args, gboolean *retry)
{
SQLiteDBConnection *conn = (SQLiteDBConnection *)vconn;
sqlite3 *db = conn->db_conn;
sqlite3_stmt *stmt;
int rc;
int nrows = 0;
rc = sqlite3_blocking_prepare_v2 (db, sql, -1, &stmt, NULL);
if (rc != SQLITE_OK) {
seaf_warning ("sqlite3_prepare_v2 failed %s: %s", sql, sqlite3_errmsg(db));
return -1;
}
if (_bind_parameters_sqlite (db, stmt, n, args) < 0) {
seaf_warning ("Failed to bind parameters for sql %s\n", sql);
nrows = -1;
goto out;
}
SQLiteDBRow row;
memset (&row, 0, sizeof(row));
row.db = db;
row.stmt = stmt;
row.column_count = sqlite3_column_count (stmt);
while (1) {
rc = sqlite3_blocking_step (stmt);
if (rc == SQLITE_ROW) {
++nrows;
if (callback && !callback ((SeafDBRow *)&row, data))
break;
} else if (rc == SQLITE_DONE) {
break;
} else {
seaf_warning ("sqlite3_step failed %s: %s\n", sql, sqlite3_errmsg(db));
nrows = -1;
goto out;
}
}
out:
sqlite3_finalize (stmt);
return nrows;
}
static int
sqlite_db_row_get_column_count (SeafDBRow *vrow)
{
SQLiteDBRow *row = (SQLiteDBRow *)vrow;
return row->column_count;
}
static const char *
sqlite_db_row_get_column_string (SeafDBRow *vrow, int idx)
{
SQLiteDBRow *row = (SQLiteDBRow *)vrow;
return (const char *)sqlite3_column_text (row->stmt, idx);
}
static int
sqlite_db_row_get_column_int (SeafDBRow *vrow, int idx)
{
SQLiteDBRow *row = (SQLiteDBRow *)vrow;
return sqlite3_column_int (row->stmt, idx);
}
static gint64
sqlite_db_row_get_column_int64 (SeafDBRow *vrow, int idx)
{
SQLiteDBRow *row = (SQLiteDBRow *)vrow;
return sqlite3_column_int64 (row->stmt, idx);
}
================================================
FILE: common/seaf-db.h
================================================
#ifndef SEAF_DB_H
#define SEAF_DB_H
enum {
SEAF_DB_TYPE_SQLITE,
SEAF_DB_TYPE_MYSQL,
SEAF_DB_TYPE_PGSQL,
};
typedef struct SeafDB SeafDB;
typedef struct SeafDB CcnetDB;
typedef struct SeafDBRow SeafDBRow;
typedef struct SeafDBRow CcnetDBRow;
typedef struct SeafDBTrans SeafDBTrans;
typedef struct SeafDBTrans CcnetDBTrans;
typedef gboolean (*SeafDBRowFunc) (SeafDBRow *, void *);
typedef gboolean (*CcnetDBRowFunc) (CcnetDBRow *, void *);
SeafDB *
seaf_db_new_mysql (const char *host,
int port,
const char *user,
const char *passwd,
const char *db,
const char *unix_socket,
gboolean use_ssl,
gboolean skip_verify,
const char *ca_path,
const char *charset,
int max_connections);
#if 0
SeafDB *
seaf_db_new_pgsql (const char *host,
unsigned int port,
const char *user,
const char *passwd,
const char *db_name,
const char *unix_socket,
int max_connections);
#endif
SeafDB *
seaf_db_new_sqlite (const char *db_path, int max_connections);
int
seaf_db_type (SeafDB *db);
int
seaf_db_query (SeafDB *db, const char *sql);
gboolean
seaf_db_check_for_existence (SeafDB *db, const char *sql, gboolean *db_err);
int
seaf_db_foreach_selected_row (SeafDB *db, const char *sql,
SeafDBRowFunc callback, void *data);
const char *
seaf_db_row_get_column_text (SeafDBRow *row, guint32 idx);
int
seaf_db_row_get_column_int (SeafDBRow *row, guint32 idx);
gint64
seaf_db_row_get_column_int64 (SeafDBRow *row, guint32 idx);
int
seaf_db_get_int (SeafDB *db, const char *sql);
gint64
seaf_db_get_int64 (SeafDB *db, const char *sql);
char *
seaf_db_get_string (SeafDB *db, const char *sql);
/* Transaction related */
SeafDBTrans *
seaf_db_begin_transaction (SeafDB *db);
void
seaf_db_trans_close (SeafDBTrans *trans);
int
seaf_db_commit (SeafDBTrans *trans);
int
seaf_db_rollback (SeafDBTrans *trans);
int
seaf_db_trans_query (SeafDBTrans *trans, const char *sql, int n, ...);
gboolean
seaf_db_trans_check_for_existence (SeafDBTrans *trans,
const char *sql,
gboolean *db_err,
int n, ...);
int
seaf_db_trans_foreach_selected_row (SeafDBTrans *trans, const char *sql,
SeafDBRowFunc callback, void *data,
int n, ...);
int
seaf_db_row_get_column_count (SeafDBRow *row);
/* Prepared Statements */
int
seaf_db_statement_query (SeafDB *db, const char *sql, int n, ...);
gboolean
seaf_db_statement_exists (SeafDB *db, const char *sql, gboolean *db_err, int n, ...);
int
seaf_db_statement_foreach_row (SeafDB *db, const char *sql,
SeafDBRowFunc callback, void *data,
int n, ...);
int
seaf_db_statement_get_int (SeafDB *db, const char *sql, int n, ...);
gint64
seaf_db_statement_get_int64 (SeafDB *db, const char *sql, int n, ...);
char *
seaf_db_statement_get_string (SeafDB *db, const char *sql, int n, ...);
#endif
================================================
FILE: common/seaf-utils.c
================================================
#include "common.h"
#include "log.h"
#include "seafile-session.h"
#include "seaf-utils.h"
#include "seaf-db.h"
#include "utils.h"
#include
#include
#include
#include
#define JWT_TOKEN_EXPIRE_TIME 3*24*3600 /* 3 days*/
char *
seafile_session_get_tmp_file_path (SeafileSession *session,
const char *basename,
char path[])
{
int path_len;
path_len = strlen (session->tmp_file_dir);
memcpy (path, session->tmp_file_dir, path_len + 1);
path[path_len] = '/';
strcpy (path + path_len + 1, basename);
return path;
}
#define DEFAULT_MAX_CONNECTIONS 100
#define SQLITE_DB_NAME "seafile.db"
#define CCNET_DB "ccnet.db"
static int
sqlite_db_start (SeafileSession *session)
{
char *db_path;
int max_connections = 0;
max_connections = g_key_file_get_integer (session->config,
"database", "max_connections",
NULL);
if (max_connections <= 0)
max_connections = DEFAULT_MAX_CONNECTIONS;
db_path = g_build_filename (session->seaf_dir, SQLITE_DB_NAME, NULL);
session->db = seaf_db_new_sqlite (db_path, max_connections);
if (!session->db) {
seaf_warning ("Failed to start sqlite db.\n");
return -1;
}
return 0;
}
#ifdef HAVE_MYSQL
#define MYSQL_DEFAULT_PORT 3306
typedef struct DBOption {
char *user;
char *passwd;
char *host;
char *ca_path;
char *charset;
char *ccnet_db_name;
char *seafile_db_name;
gboolean use_ssl;
gboolean skip_verify;
int port;
int max_connections;
} DBOption;
static void
db_option_free (DBOption *option)
{
if (!option)
return;
g_free (option->user);
g_free (option->passwd);
g_free (option->host);
g_free (option->ca_path);
g_free (option->charset);
g_free (option->ccnet_db_name);
g_free (option->seafile_db_name);
g_free (option);
}
static int
load_db_option_from_env (DBOption *option)
{
const char *env_user, *env_passwd, *env_host, *env_ccnet_db, *env_seafile_db, *env_port;
env_user = g_getenv("SEAFILE_MYSQL_DB_USER");
env_passwd = g_getenv("SEAFILE_MYSQL_DB_PASSWORD");
env_host = g_getenv("SEAFILE_MYSQL_DB_HOST");
env_port = g_getenv("SEAFILE_MYSQL_DB_PORT");
env_ccnet_db = g_getenv("SEAFILE_MYSQL_DB_CCNET_DB_NAME");
env_seafile_db = g_getenv("SEAFILE_MYSQL_DB_SEAFILE_DB_NAME");
if (env_user && g_strcmp0 (env_user, "") != 0) {
g_free (option->user);
option->user = g_strdup (env_user);
}
if (env_passwd && g_strcmp0 (env_passwd, "") != 0) {
g_free (option->passwd);
option->passwd = g_strdup (env_passwd);
}
if (env_host && g_strcmp0 (env_host, "") != 0) {
g_free (option->host);
option->host = g_strdup (env_host);
}
if (env_port && g_strcmp0(env_port, "") != 0) {
int port = atoi(env_port);
if (port > 0) {
option->port = port;
}
}
if (env_ccnet_db && g_strcmp0 (env_ccnet_db, "") != 0) {
g_free (option->ccnet_db_name);
option->ccnet_db_name = g_strdup (env_ccnet_db);
} else if (!option->ccnet_db_name) {
option->ccnet_db_name = g_strdup ("ccnet_db");
seaf_message ("Failed to read SEAFILE_MYSQL_DB_CCNET_DB_NAME, use ccnet_db by default\n");
}
if (env_seafile_db && g_strcmp0 (env_seafile_db, "") != 0) {
g_free (option->seafile_db_name);
option->seafile_db_name = g_strdup (env_seafile_db);
} else if (!option->seafile_db_name) {
option->seafile_db_name = g_strdup ("seafile_db");
seaf_message ("Failed to read SEAFILE_MYSQL_DB_SEAFILE_DB_NAME, use seafile_db by default\n");
}
return 0;
}
static DBOption *
load_db_option (SeafileSession *session)
{
GError *error = NULL;
int ret = 0;
DBOption *option = g_new0 (DBOption, 1);
option->host = seaf_key_file_get_string (session->config, "database", "host", NULL);
option->port = g_key_file_get_integer (session->config, "database", "port", &error);
if (error) {
g_clear_error (&error);
option->port = MYSQL_DEFAULT_PORT;
}
option->user = seaf_key_file_get_string (session->config, "database", "user", NULL);
option->passwd = seaf_key_file_get_string (session->config, "database", "password", NULL);
option->seafile_db_name = seaf_key_file_get_string (session->config, "database", "db_name", NULL);
option->use_ssl = g_key_file_get_boolean (session->config,
"database", "use_ssl", NULL);
option->skip_verify = g_key_file_get_boolean (session->config,
"database", "skip_verify", NULL);
if (option->use_ssl && !option->skip_verify) {
option->ca_path = seaf_key_file_get_string (session->config,
"database", "ca_path", NULL);
if (!option->ca_path) {
seaf_warning ("ca_path is required if use ssl and don't skip verify.\n");
ret = -1;
goto out;
}
}
option->charset = seaf_key_file_get_string (session->config,
"database", "connection_charset", NULL);
option->max_connections = g_key_file_get_integer (session->config,
"database", "max_connections",
&error);
if (error || option->max_connections < 0) {
if (error)
g_clear_error (&error);
option->max_connections = DEFAULT_MAX_CONNECTIONS;
}
load_db_option_from_env (option);
if (!option->host) {
seaf_warning ("DB host not set in config.\n");
ret = -1;
goto out;
}
if (!option->user) {
seaf_warning ("DB user not set in config.\n");
ret = -1;
goto out;
}
if (!option->passwd) {
seaf_warning ("DB passwd not set in config.\n");
ret = -1;
goto out;
}
if (!option->ccnet_db_name) {
seaf_warning ("ccnet_db_name not set in config.\n");
ret = -1;
goto out;
}
if (!option->seafile_db_name) {
seaf_warning ("db_name not set in config.\n");
ret = -1;
goto out;
}
out:
if (ret < 0) {
db_option_free (option);
return NULL;
}
return option;
}
static int
mysql_db_start (SeafileSession *session)
{
DBOption *option = NULL;
option = load_db_option (session);
if (!option) {
seaf_warning ("Failed to load database config.\n");
return -1;
}
session->db = seaf_db_new_mysql (option->host, option->port, option->user, option->passwd, option->seafile_db_name,
NULL, option->use_ssl, option->skip_verify, option->ca_path, option->charset, option->max_connections);
if (!session->db) {
db_option_free (option);
seaf_warning ("Failed to start mysql db.\n");
return -1;
}
db_option_free (option);
return 0;
}
#endif
#ifdef HAVE_POSTGRESQL
static int
pgsql_db_start (SeafileSession *session)
{
char *host, *user, *passwd, *db, *unix_socket;
unsigned int port;
GError *error = NULL;
host = seaf_key_file_get_string (session->config, "database", "host", &error);
if (!host) {
seaf_warning ("DB host not set in config.\n");
return -1;
}
user = seaf_key_file_get_string (session->config, "database", "user", &error);
if (!user) {
seaf_warning ("DB user not set in config.\n");
return -1;
}
passwd = seaf_key_file_get_string (session->config, "database", "password", &error);
if (!passwd) {
seaf_warning ("DB passwd not set in config.\n");
return -1;
}
db = seaf_key_file_get_string (session->config, "database", "db_name", &error);
if (!db) {
seaf_warning ("DB name not set in config.\n");
return -1;
}
port = g_key_file_get_integer (session->config,
"database", "port", &error);
if (error) {
port = 0;
g_clear_error (&error);
}
unix_socket = seaf_key_file_get_string (session->config,
"database", "unix_socket", &error);
session->db = seaf_db_new_pgsql (host, port, user, passwd, db, unix_socket,
DEFAULT_MAX_CONNECTIONS);
if (!session->db) {
seaf_warning ("Failed to start pgsql db.\n");
return -1;
}
g_free (host);
g_free (user);
g_free (passwd);
g_free (db);
g_free (unix_socket);
return 0;
}
#endif
int
load_database_config (SeafileSession *session)
{
char *type;
GError *error = NULL;
int ret = 0;
gboolean create_tables = FALSE;
type = seaf_key_file_get_string (session->config, "database", "type", &error);
/* Default to use mysql if not set. */
if (type && strcasecmp (type, "sqlite") == 0) {
ret = sqlite_db_start (session);
}
#ifdef HAVE_MYSQL
else {
ret = mysql_db_start (session);
}
#endif
if (ret == 0) {
if (g_key_file_has_key (session->config, "database", "create_tables", NULL))
create_tables = g_key_file_get_boolean (session->config,
"database", "create_tables", NULL);
session->create_tables = create_tables;
}
g_free (type);
return ret;
}
static int
ccnet_init_sqlite_database (SeafileSession *session)
{
char *db_path;
db_path = g_build_path ("/", session->ccnet_dir, CCNET_DB, NULL);
session->ccnet_db = seaf_db_new_sqlite (db_path, DEFAULT_MAX_CONNECTIONS);
if (!session->ccnet_db) {
seaf_warning ("Failed to open ccnet database.\n");
return -1;
}
return 0;
}
#ifdef HAVE_MYSQL
static int
ccnet_init_mysql_database (SeafileSession *session)
{
DBOption *option = NULL;
option = load_db_option (session);
if (!option) {
seaf_warning ("Failed to load database config.\n");
return -1;
}
session->ccnet_db = seaf_db_new_mysql (option->host, option->port, option->user, option->passwd, option->ccnet_db_name,
NULL, option->use_ssl, option->skip_verify, option->ca_path, option->charset, option->max_connections);
if (!session->ccnet_db) {
db_option_free (option);
seaf_warning ("Failed to open ccnet database.\n");
return -1;
}
db_option_free (option);
return 0;
}
#endif
int
load_ccnet_database_config (SeafileSession *session)
{
int ret;
char *engine;
gboolean create_tables = FALSE;
engine = ccnet_key_file_get_string (session->config, "database", "type");
if (engine && strcasecmp (engine, "sqlite") == 0) {
seaf_message ("Use database sqlite\n");
ret = ccnet_init_sqlite_database (session);
}
#ifdef HAVE_MYSQL
else {
seaf_message("Use database Mysql\n");
ret = ccnet_init_mysql_database (session);
}
#endif
if (ret == 0) {
if (g_key_file_has_key (session->config, "database", "create_tables", NULL))
create_tables = g_key_file_get_boolean (session->config, "database", "create_tables", NULL);
session->ccnet_create_tables = create_tables;
}
g_free (engine);
return ret;
}
#ifdef FULL_FEATURE
char *
seaf_gen_notif_server_jwt (const char *repo_id, const char *username)
{
char *jwt_token = NULL;
gint64 now = (gint64)time(NULL);
jwt_t *jwt = NULL;
if (!seaf->notif_server_private_key) {
seaf_warning ("No private key is configured for generating jwt token\n");
return NULL;
}
int ret = jwt_new (&jwt);
if (ret != 0 || jwt == NULL) {
seaf_warning ("Failed to create jwt\n");
goto out;
}
ret = jwt_add_grant (jwt, "repo_id", repo_id);
if (ret != 0) {
seaf_warning ("Failed to add repo_id to jwt\n");
goto out;
}
ret = jwt_add_grant (jwt, "username", username);
if (ret != 0) {
seaf_warning ("Failed to add username to jwt\n");
goto out;
}
ret = jwt_add_grant_int (jwt, "exp", now + JWT_TOKEN_EXPIRE_TIME);
if (ret != 0) {
seaf_warning ("Failed to expire time to jwt\n");
goto out;
}
ret = jwt_set_alg (jwt, JWT_ALG_HS256, (unsigned char *)seaf->notif_server_private_key, strlen(seaf->notif_server_private_key));
if (ret != 0) {
seaf_warning ("Failed to set alg\n");
goto out;
}
jwt_token = jwt_encode_str (jwt);
out:
jwt_free (jwt);
return jwt_token;
}
#endif
char *
seaf_parse_auth_token (const char *auth_token)
{
char *token = NULL;
char **parts = NULL;
if (!auth_token) {
return NULL;
}
parts = g_strsplit (auth_token, " ", 2);
if (!parts) {
return NULL;
}
if (g_strv_length (parts) < 2) {
g_strfreev (parts);
return NULL;
}
token = g_strdup(parts[1]);
g_strfreev (parts);
return token;
}
void
split_filename (const char *filename, char **name, char **ext)
{
char *dot;
dot = strrchr (filename, '.');
if (dot) {
*ext = g_strdup (dot + 1);
*name = g_strndup (filename, dot - filename);
} else {
*name = g_strdup (filename);
*ext = NULL;
}
}
static gboolean
collect_token_list (SeafDBRow *row, void *data)
{
GList **p_tokens = data;
const char *token;
token = seaf_db_row_get_column_text (row, 0);
*p_tokens = g_list_prepend (*p_tokens, g_strdup(token));
return TRUE;
}
int
seaf_delete_repo_tokens (SeafRepo *repo)
{
int ret = 0;
const char *template;
GList *token_list = NULL;
GList *ptr;
GString *token_list_str = g_string_new ("");
GString *sql = g_string_new ("");
int rc;
template = "SELECT u.token FROM RepoUserToken as u WHERE u.repo_id=?";
rc = seaf_db_statement_foreach_row (seaf->db, template,
collect_token_list, &token_list,
1, "string", repo->id);
if (rc < 0) {
goto out;
}
if (rc == 0)
goto out;
for (ptr = token_list; ptr; ptr = ptr->next) {
const char *token = (char *)ptr->data;
if (token_list_str->len == 0)
g_string_append_printf (token_list_str, "'%s'", token);
else
g_string_append_printf (token_list_str, ",'%s'", token);
}
/* Note that there is a size limit on sql query. In MySQL it's 1MB by default.
* Normally the token_list won't be that long.
*/
g_string_printf (sql, "DELETE FROM RepoUserToken WHERE token in (%s)",
token_list_str->str);
rc = seaf_db_statement_query (seaf->db, sql->str, 0);
if (rc < 0) {
goto out;
}
g_string_printf (sql, "DELETE FROM RepoTokenPeerInfo WHERE token in (%s)",
token_list_str->str);
rc = seaf_db_statement_query (seaf->db, sql->str, 0);
if (rc < 0) {
goto out;
}
out:
g_string_free (token_list_str, TRUE);
g_string_free (sql, TRUE);
g_list_free_full (token_list, (GDestroyNotify)g_free);
if (rc < 0) {
ret = -1;
}
return ret;
}
================================================
FILE: common/seaf-utils.h
================================================
#ifndef SEAF_UTILS_H
#define SEAF_UTILS_H
#include
struct _SeafileSession;
char *
seafile_session_get_tmp_file_path (struct _SeafileSession *session,
const char *basename,
char path[]);
int
load_database_config (struct _SeafileSession *session);
int
load_ccnet_database_config (struct _SeafileSession *session);
#ifdef FULL_FEATURE
#endif
char *
seaf_gen_notif_server_jwt (const char *repo_id, const char *username);
char *
seaf_parse_auth_token (const char *auth_token);
void
split_filename (const char *filename, char **name, char **ext);
int
seaf_delete_repo_tokens (SeafRepo *repo);
#endif
================================================
FILE: common/seafile-crypt.c
================================================
/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
#include
#include
#include "seafile-crypt.h"
#include "password-hash.h"
#include
#include "utils.h"
#include "log.h"
/*
The EVP_EncryptXXX and EVP_DecryptXXX series of functions have a
weird choice of returned value.
*/
#define ENC_SUCCESS 1
#define ENC_FAILURE 0
#define DEC_SUCCESS 1
#define DEC_FAILURE 0
#define KEYGEN_ITERATION 1 << 19
#define KEYGEN_ITERATION2 1000
/* Should generate random salt for each repo. */
static unsigned char salt[8] = { 0xda, 0x90, 0x45, 0xc3, 0x06, 0xc7, 0xcc, 0x26 };
SeafileCrypt *
seafile_crypt_new (int version, unsigned char *key, unsigned char *iv)
{
SeafileCrypt *crypt = g_new0 (SeafileCrypt, 1);
crypt->version = version;
if (version == 1)
memcpy (crypt->key, key, 16);
else
memcpy (crypt->key, key, 32);
memcpy (crypt->iv, iv, 16);
return crypt;
}
int
seafile_derive_key (const char *data_in, int in_len, int version,
const char *repo_salt,
unsigned char *key, unsigned char *iv)
{
if (version >= 3) {
unsigned char repo_salt_bin[32];
hex_to_rawdata (repo_salt, repo_salt_bin, 32);
PKCS5_PBKDF2_HMAC (data_in, in_len,
repo_salt_bin, sizeof(repo_salt_bin),
KEYGEN_ITERATION2,
EVP_sha256(),
32, key);
PKCS5_PBKDF2_HMAC ((char *)key, 32,
repo_salt_bin, sizeof(repo_salt_bin),
10,
EVP_sha256(),
16, iv);
return 0;
} else if (version == 2) {
PKCS5_PBKDF2_HMAC (data_in, in_len,
salt, sizeof(salt),
KEYGEN_ITERATION2,
EVP_sha256(),
32, key);
PKCS5_PBKDF2_HMAC ((char *)key, 32,
salt, sizeof(salt),
10,
EVP_sha256(),
16, iv);
return 0;
} else if (version == 1)
return EVP_BytesToKey (EVP_aes_128_cbc(), /* cipher mode */
EVP_sha1(), /* message digest */
salt, /* salt */
(unsigned char*)data_in,
in_len,
KEYGEN_ITERATION, /* iteration times */
key, /* the derived key */
iv); /* IV, initial vector */
else
return EVP_BytesToKey (EVP_aes_128_ecb(), /* cipher mode */
EVP_sha1(), /* message digest */
NULL, /* salt */
(unsigned char*)data_in,
in_len,
3, /* iteration times */
key, /* the derived key */
iv); /* IV, initial vector */
}
int
seafile_generate_repo_salt (char *repo_salt)
{
unsigned char repo_salt_bin[32];
int rc = RAND_bytes (repo_salt_bin, sizeof(repo_salt_bin));
if (rc != 1) {
seaf_warning ("Failed to generate salt for repo encryption.\n");
return -1;
}
rawdata_to_hex (repo_salt_bin, repo_salt, 32);
return 0;
}
int
seafile_generate_random_key (const char *passwd,
int version,
const char *repo_salt,
char *random_key)
{
SeafileCrypt *crypt;
unsigned char secret_key[32], *rand_key;
int outlen;
unsigned char key[32], iv[16];
int rc = RAND_bytes (secret_key, sizeof(secret_key));
if (rc != 1) {
seaf_warning ("Failed to generate secret key for repo encryption.\n");
return -1;
}
seafile_derive_key (passwd, strlen(passwd), version, repo_salt, key, iv);
crypt = seafile_crypt_new (version, key, iv);
seafile_encrypt ((char **)&rand_key, &outlen,
(char *)secret_key, sizeof(secret_key), crypt);
rawdata_to_hex (rand_key, random_key, 48);
g_free (crypt);
g_free (rand_key);
return 0;
}
void
seafile_generate_magic (int version, const char *repo_id,
const char *passwd,
const char *repo_salt,
char *magic)
{
GString *buf = g_string_new (NULL);
unsigned char key[32], iv[16];
/* Compute a "magic" string from repo_id and passwd.
* This is used to verify the password given by user before decrypting
* data.
*/
g_string_append_printf (buf, "%s%s", repo_id, passwd);
seafile_derive_key (buf->str, buf->len, version, repo_salt, key, iv);
g_string_free (buf, TRUE);
rawdata_to_hex (key, magic, 32);
}
void
seafile_generate_pwd_hash (int version,
const char *repo_id,
const char *passwd,
const char *repo_salt,
const char *algo,
const char *params_str,
char *pwd_hash)
{
GString *buf = g_string_new (NULL);
unsigned char key[32];
/* Compute a "pwd_hash" string from repo_id and passwd.
* This is used to verify the password given by user before decrypting
* data.
*/
g_string_append_printf (buf, "%s%s", repo_id, passwd);
if (version <= 2) {
// use fixed repo salt
char fixed_salt[64] = {0};
rawdata_to_hex(salt, fixed_salt, 8);
pwd_hash_derive_key (buf->str, buf->len, fixed_salt, algo, params_str, key);
} else {
pwd_hash_derive_key (buf->str, buf->len, repo_salt, algo, params_str, key);
}
g_string_free (buf, TRUE);
rawdata_to_hex (key, pwd_hash, 32);
}
int
seafile_verify_repo_passwd (const char *repo_id,
const char *passwd,
const char *magic,
int version,
const char *repo_salt)
{
GString *buf = g_string_new (NULL);
unsigned char key[32], iv[16];
char hex[65];
if (version != 1 && version != 2 && version != 3 && version != 4) {
seaf_warning ("Unsupported enc_version %d.\n", version);
return -1;
}
/* Recompute the magic and compare it with the one comes with the repo. */
g_string_append_printf (buf, "%s%s", repo_id, passwd);
seafile_derive_key (buf->str, buf->len, version, repo_salt, key, iv);
g_string_free (buf, TRUE);
if (version >= 2)
rawdata_to_hex (key, hex, 32);
else
rawdata_to_hex (key, hex, 16);
if (g_strcmp0 (hex, magic) == 0)
return 0;
else
return -1;
}
int
seafile_pwd_hash_verify_repo_passwd (int version,
const char *repo_id,
const char *passwd,
const char *repo_salt,
const char *pwd_hash,
const char *algo,
const char *params_str)
{
GString *buf = g_string_new (NULL);
unsigned char key[32];
char hex[65];
g_string_append_printf (buf, "%s%s", repo_id, passwd);
if (version <= 2) {
// use fixed repo salt
char fixed_salt[64] = {0};
rawdata_to_hex(salt, fixed_salt, 8);
pwd_hash_derive_key (buf->str, buf->len, fixed_salt, algo, params_str, key);
} else {
pwd_hash_derive_key (buf->str, buf->len, repo_salt, algo, params_str, key);
}
g_string_free (buf, TRUE);
rawdata_to_hex (key, hex, 32);
if (g_strcmp0 (hex, pwd_hash) == 0)
return 0;
else
return -1;
}
int
seafile_decrypt_repo_enc_key (int enc_version,
const char *passwd, const char *random_key,
const char *repo_salt,
unsigned char *key_out, unsigned char *iv_out)
{
unsigned char key[32], iv[16];
seafile_derive_key (passwd, strlen(passwd), enc_version, repo_salt, key, iv);
if (enc_version == 1) {
memcpy (key_out, key, 16);
memcpy (iv_out, iv, 16);
return 0;
} else if (enc_version >= 2) {
unsigned char enc_random_key[48], *dec_random_key;
int outlen;
SeafileCrypt *crypt;
if (random_key == NULL || random_key[0] == 0) {
seaf_warning ("Empty random key.\n");
return -1;
}
hex_to_rawdata (random_key, enc_random_key, 48);
crypt = seafile_crypt_new (enc_version, key, iv);
if (seafile_decrypt ((char **)&dec_random_key, &outlen,
(char *)enc_random_key, 48,
crypt) < 0) {
seaf_warning ("Failed to decrypt random key.\n");
g_free (crypt);
return -1;
}
g_free (crypt);
seafile_derive_key ((char *)dec_random_key, 32, enc_version,
repo_salt,
key, iv);
memcpy (key_out, key, 32);
memcpy (iv_out, iv, 16);
g_free (dec_random_key);
return 0;
}
return -1;
}
int
seafile_update_random_key (const char *old_passwd, const char *old_random_key,
const char *new_passwd, char *new_random_key,
int enc_version, const char *repo_salt)
{
unsigned char key[32], iv[16];
unsigned char random_key_raw[48], *secret_key, *new_random_key_raw;
int secret_key_len, random_key_len;
SeafileCrypt *crypt;
/* First, use old_passwd to decrypt secret key from old_random_key. */
seafile_derive_key (old_passwd, strlen(old_passwd), enc_version,
repo_salt, key, iv);
hex_to_rawdata (old_random_key, random_key_raw, 48);
crypt = seafile_crypt_new (enc_version, key, iv);
if (seafile_decrypt ((char **)&secret_key, &secret_key_len,
(char *)random_key_raw, 48,
crypt) < 0) {
seaf_warning ("Failed to decrypt random key.\n");
g_free (crypt);
return -1;
}
g_free (crypt);
/* Second, use new_passwd to encrypt secret key. */
seafile_derive_key (new_passwd, strlen(new_passwd), enc_version,
repo_salt, key, iv);
crypt = seafile_crypt_new (enc_version, key, iv);
seafile_encrypt ((char **)&new_random_key_raw, &random_key_len,
(char *)secret_key, secret_key_len, crypt);
rawdata_to_hex (new_random_key_raw, new_random_key, 48);
g_free (secret_key);
g_free (new_random_key_raw);
g_free (crypt);
return 0;
}
int
seafile_encrypt (char **data_out,
int *out_len,
const char *data_in,
const int in_len,
SeafileCrypt *crypt)
{
*data_out = NULL;
*out_len = -1;
/* check validation */
if ( data_in == NULL || in_len <= 0 || crypt == NULL) {
seaf_warning ("Invalid params.\n");
return -1;
}
EVP_CIPHER_CTX *ctx;
int ret;
int blks;
/* Prepare CTX for encryption. */
ctx = EVP_CIPHER_CTX_new ();
if (crypt->version == 1)
ret = EVP_EncryptInit_ex (ctx,
EVP_aes_128_cbc(), /* cipher mode */
NULL, /* engine, NULL for default */
crypt->key, /* derived key */
crypt->iv); /* initial vector */
else if (crypt->version == 3)
ret = EVP_EncryptInit_ex (ctx,
EVP_aes_128_ecb(), /* cipher mode */
NULL, /* engine, NULL for default */
crypt->key, /* derived key */
crypt->iv); /* initial vector */
else
ret = EVP_EncryptInit_ex (ctx,
EVP_aes_256_cbc(), /* cipher mode */
NULL, /* engine, NULL for default */
crypt->key, /* derived key */
crypt->iv); /* initial vector */
if (ret == ENC_FAILURE) {
EVP_CIPHER_CTX_free (ctx);
return -1;
}
/* Allocating output buffer. */
/*
For EVP symmetric encryption, padding is always used __even if__
data size is a multiple of block size, in which case the padding
length is the block size. so we have the following:
*/
blks = (in_len / BLK_SIZE) + 1;
*data_out = (char *)g_malloc (blks * BLK_SIZE);
if (*data_out == NULL) {
seaf_warning ("failed to allocate the output buffer.\n");
goto enc_error;
}
int update_len, final_len;
/* Do the encryption. */
ret = EVP_EncryptUpdate (ctx,
(unsigned char*)*data_out,
&update_len,
(unsigned char*)data_in,
in_len);
if (ret == ENC_FAILURE)
goto enc_error;
/* Finish the possible partial block. */
ret = EVP_EncryptFinal_ex (ctx,
(unsigned char*)*data_out + update_len,
&final_len);
*out_len = update_len + final_len;
/* out_len should be equal to the allocated buffer size. */
if (ret == ENC_FAILURE || *out_len != (blks * BLK_SIZE))
goto enc_error;
EVP_CIPHER_CTX_free (ctx);
return 0;
enc_error:
EVP_CIPHER_CTX_free (ctx);
*out_len = -1;
if (*data_out != NULL)
g_free (*data_out);
*data_out = NULL;
return -1;
}
int
seafile_decrypt (char **data_out,
int *out_len,
const char *data_in,
const int in_len,
SeafileCrypt *crypt)
{
*data_out = NULL;
*out_len = -1;
/* Check validation. Because padding is always used, in_len must
* be a multiple of BLK_SIZE */
if ( data_in == NULL || in_len <= 0 || in_len % BLK_SIZE != 0 ||
crypt == NULL) {
seaf_warning ("Invalid param(s).\n");
return -1;
}
EVP_CIPHER_CTX *ctx;
int ret;
/* Prepare CTX for decryption. */
ctx = EVP_CIPHER_CTX_new ();
if (crypt->version == 1)
ret = EVP_DecryptInit_ex (ctx,
EVP_aes_128_cbc(), /* cipher mode */
NULL, /* engine, NULL for default */
crypt->key, /* derived key */
crypt->iv); /* initial vector */
else if (crypt->version == 3)
ret = EVP_DecryptInit_ex (ctx,
EVP_aes_128_ecb(), /* cipher mode */
NULL, /* engine, NULL for default */
crypt->key, /* derived key */
crypt->iv); /* initial vector */
else
ret = EVP_DecryptInit_ex (ctx,
EVP_aes_256_cbc(), /* cipher mode */
NULL, /* engine, NULL for default */
crypt->key, /* derived key */
crypt->iv); /* initial vector */
if (ret == DEC_FAILURE) {
EVP_CIPHER_CTX_free (ctx);
return -1;
}
/* Allocating output buffer. */
*data_out = (char *)g_malloc (in_len);
if (*data_out == NULL) {
seaf_warning ("failed to allocate the output buffer.\n");
goto dec_error;
}
int update_len, final_len;
/* Do the decryption. */
ret = EVP_DecryptUpdate (ctx,
(unsigned char*)*data_out,
&update_len,
(unsigned char*)data_in,
in_len);
if (ret == DEC_FAILURE)
goto dec_error;
/* Finish the possible partial block. */
ret = EVP_DecryptFinal_ex (ctx,
(unsigned char*)*data_out + update_len,
&final_len);
*out_len = update_len + final_len;
/* out_len should be smaller than in_len. */
if (ret == DEC_FAILURE || *out_len > in_len)
goto dec_error;
EVP_CIPHER_CTX_free (ctx);
return 0;
dec_error:
EVP_CIPHER_CTX_free (ctx);
*out_len = -1;
if (*data_out != NULL)
g_free (*data_out);
*data_out = NULL;
return -1;
}
int
seafile_decrypt_init (EVP_CIPHER_CTX **ctx,
int version,
const unsigned char *key,
const unsigned char *iv)
{
int ret;
/* Prepare CTX for decryption. */
*ctx = EVP_CIPHER_CTX_new ();
if (version == 1)
ret = EVP_DecryptInit_ex (*ctx,
EVP_aes_128_cbc(), /* cipher mode */
NULL, /* engine, NULL for default */
key, /* derived key */
iv); /* initial vector */
else if (version == 3)
ret = EVP_DecryptInit_ex (*ctx,
EVP_aes_128_ecb(), /* cipher mode */
NULL, /* engine, NULL for default */
key, /* derived key */
iv); /* initial vector */
else
ret = EVP_DecryptInit_ex (*ctx,
EVP_aes_256_cbc(), /* cipher mode */
NULL, /* engine, NULL for default */
key, /* derived key */
iv); /* initial vector */
if (ret == DEC_FAILURE)
return -1;
return 0;
}
================================================
FILE: common/seafile-crypt.h
================================================
/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
/*
Description:
The function pair "seafile_encrypt/seafile_decrypt" are used to
encrypt/decrypt data in the seafile system, using AES 128 bit ecb
algorithm provided by openssl.
*/
#ifndef _SEAFILE_CRYPT_H
#define _SEAFILE_CRYPT_H
#include
#include
/* Block size, in bytes. For AES it can only be 16 bytes. */
#define BLK_SIZE 16
#define ENCRYPT_BLK_SIZE BLK_SIZE
struct SeafileCrypt {
int version;
unsigned char key[32]; /* set when enc_version >= 1 */
unsigned char iv[16];
};
typedef struct SeafileCrypt SeafileCrypt;
SeafileCrypt *
seafile_crypt_new (int version, unsigned char *key, unsigned char *iv);
/*
Derive key and iv used by AES encryption from @data_in.
key and iv is 16 bytes for version 1, and 32 bytes for version 2.
@data_out: pointer to the output of the encrpyted/decrypted data,
whose content must be freed by g_free when not used.
@out_len: pointer to length of output, in bytes
@data_in: address of input buffer
@in_len: length of data to be encrpyted/decrypted, in bytes
@crypt: container of crypto info.
RETURN VALUES:
On success, 0 is returned, and the encrpyted/decrypted data is in
*data_out, with out_len set to its length. On failure, -1 is returned
and *data_out is set to NULL, with out_len set to -1;
*/
int
seafile_derive_key (const char *data_in, int in_len, int version,
const char *repo_salt,
unsigned char *key, unsigned char *iv);
/* @salt must be an char array of size 65 bytes. */
int
seafile_generate_repo_salt (char *repo_salt);
/*
* Generate the real key used to encrypt data.
* The key 32 bytes long and encrpted with @passwd.
*/
int
seafile_generate_random_key (const char *passwd,
int version,
const char *repo_salt,
char *random_key);
void
seafile_generate_magic (int version, const char *repo_id,
const char *passwd,
const char *repo_salt,
char *magic);
void
seafile_generate_pwd_hash (int version,
const char *repo_id,
const char *passwd,
const char *repo_salt,
const char *algo,
const char *params_str,
char *pwd_hash);
int
seafile_verify_repo_passwd (const char *repo_id,
const char *passwd,
const char *magic,
int version,
const char *repo_salt);
int
seafile_pwd_hash_verify_repo_passwd (int version,
const char *repo_id,
const char *passwd,
const char *repo_salt,
const char *pwd_hash,
const char *algo,
const char *params_str);
int
seafile_decrypt_repo_enc_key (int enc_version,
const char *passwd, const char *random_key,
const char *repo_salt,
unsigned char *key_out, unsigned char *iv_out);
int
seafile_update_random_key (const char *old_passwd, const char *old_random_key,
const char *new_passwd, char *new_random_key,
int enc_version, const char *repo_salt);
int
seafile_encrypt (char **data_out,
int *out_len,
const char *data_in,
const int in_len,
SeafileCrypt *crypt);
int
seafile_decrypt (char **data_out,
int *out_len,
const char *data_in,
const int in_len,
SeafileCrypt *crypt);
int
seafile_decrypt_init (EVP_CIPHER_CTX **ctx,
int version,
const unsigned char *key,
const unsigned char *iv);
#endif /* _SEAFILE_CRYPT_H */
================================================
FILE: common/sync-repo-common.h
================================================
/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
#ifndef SYNC_REPO_COMMON
#define SYNC_REPO_COMMON
#define SC_COMMIT_ID "300"
#define SS_COMMIT_ID "Commit ID"
#define SC_NO_REPO "301"
#define SS_NO_REPO "No such repo"
#define SC_NO_BRANCH "302"
#define SS_NO_BRANCH "No such branch"
#define SC_NO_DSYNC "303"
#define SS_NO_DSYNC "Not double sync"
#define SC_REPO_CORRUPT "304"
#define SS_REPO_CORRUPT "Repo corrupted"
#define SC_SERVER_ERROR "401"
#define SS_SERVER_ERROR "Internal server error"
#endif
================================================
FILE: common/user-mgr.c
================================================
/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
#include "common.h"
#include
#include
#include "utils.h"
#include "seafile-session.h"
#include "seafile-error.h"
#include "user-mgr.h"
#include "seaf-db.h"
#include "seaf-utils.h"
#include
#include
#include
#define DEBUG_FLAG CCNET_DEBUG_PEER
#include "log.h"
#define DEFAULT_SAVING_INTERVAL_MSEC 30000
#define DEFAULT_MAX_CONNECTIONS 100
G_DEFINE_TYPE (CcnetUserManager, ccnet_user_manager, G_TYPE_OBJECT);
#define GET_PRIV(o) \
(G_TYPE_INSTANCE_GET_PRIVATE ((o), CCNET_TYPE_USER_MANAGER, CcnetUserManagerPriv))
static int open_db (CcnetUserManager *manager);
struct CcnetUserManagerPriv {
CcnetDB *db;
int max_users;
};
static void
ccnet_user_manager_class_init (CcnetUserManagerClass *klass)
{
g_type_class_add_private (klass, sizeof (CcnetUserManagerPriv));
}
static void
ccnet_user_manager_init (CcnetUserManager *manager)
{
manager->priv = GET_PRIV(manager);
}
CcnetUserManager*
ccnet_user_manager_new (SeafileSession *session)
{
CcnetUserManager* manager;
manager = g_object_new (CCNET_TYPE_USER_MANAGER, NULL);
manager->session = session;
manager->user_hash = g_hash_table_new (g_str_hash, g_str_equal);
return manager;
}
#define DEFAULT_PASSWD_HASH_ITER 10000
// return current active user number
static int
get_current_user_number (CcnetUserManager *manager)
{
int total = 0, count;
count = ccnet_user_manager_count_emailusers (manager, "DB");
if (count < 0) {
ccnet_warning ("Failed to get user number from DB.\n");
return -1;
}
total += count;
return total;
}
static gboolean
check_user_number (CcnetUserManager *manager, gboolean allow_equal)
{
if (manager->priv->max_users == 0) {
return TRUE;
}
int cur_num = get_current_user_number (manager);
if (cur_num < 0) {
return FALSE;
}
if ((allow_equal && cur_num > manager->priv->max_users) ||
(!allow_equal && cur_num >= manager->priv->max_users)) {
ccnet_warning ("The number of users exceeds limit, max %d, current %d\n",
manager->priv->max_users, cur_num);
return FALSE;
}
return TRUE;
}
int
ccnet_user_manager_prepare (CcnetUserManager *manager)
{
int ret;
manager->passwd_hash_iter = DEFAULT_PASSWD_HASH_ITER;
manager->userdb_path = g_build_filename (manager->session->ccnet_dir,
"user-db", NULL);
ret = open_db(manager);
if (ret < 0)
return ret;
if (!check_user_number (manager, TRUE)) {
return -1;
}
return 0;
}
void
ccnet_user_manager_free (CcnetUserManager *manager)
{
g_object_unref (manager);
}
void
ccnet_user_manager_start (CcnetUserManager *manager)
{
}
void ccnet_user_manager_on_exit (CcnetUserManager *manager)
{
}
void
ccnet_user_manager_set_max_users (CcnetUserManager *manager, gint64 max_users)
{
manager->priv->max_users = max_users;
}
/* -------- DB Operations -------- */
static int check_db_table (SeafDB *db)
{
char *sql;
int db_type = seaf_db_type (db);
if (db_type == SEAF_DB_TYPE_MYSQL) {
sql = "CREATE TABLE IF NOT EXISTS EmailUser ("
"id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, "
"email VARCHAR(255), passwd VARCHAR(256), "
"is_staff BOOL NOT NULL, is_active BOOL NOT NULL, "
"ctime BIGINT, reference_id VARCHAR(255),"
"UNIQUE INDEX (email), UNIQUE INDEX (reference_id))"
"ENGINE=INNODB";
if (seaf_db_query (db, sql) < 0)
return -1;
sql = "CREATE TABLE IF NOT EXISTS Binding (id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, "
"email VARCHAR(255), peer_id CHAR(41),"
"UNIQUE INDEX (peer_id), INDEX (email(20)))"
"ENGINE=INNODB";
if (seaf_db_query (db, sql) < 0)
return -1;
sql = "CREATE TABLE IF NOT EXISTS UserRole ("
"id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, "
"email VARCHAR(255), role VARCHAR(255), UNIQUE INDEX (email)) "
"ENGINE=INNODB";
if (seaf_db_query (db, sql) < 0)
return -1;
sql = "CREATE TABLE IF NOT EXISTS LDAPConfig ( "
"id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, cfg_group VARCHAR(255) NOT NULL,"
"cfg_key VARCHAR(255) NOT NULL, value VARCHAR(255), property INTEGER) ENGINE=INNODB";
if (seaf_db_query (db, sql) < 0)
return -1;
} else if (db_type == SEAF_DB_TYPE_SQLITE) {
sql = "CREATE TABLE IF NOT EXISTS EmailUser ("
"id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,"
"email TEXT, passwd TEXT, is_staff bool NOT NULL, "
"is_active bool NOT NULL, ctime INTEGER, "
"reference_id TEXT)";
if (seaf_db_query (db, sql) < 0)
return -1;
sql = "CREATE UNIQUE INDEX IF NOT EXISTS email_index on EmailUser (email)";
if (seaf_db_query (db, sql) < 0)
return -1;
sql = "CREATE UNIQUE INDEX IF NOT EXISTS reference_id_index on EmailUser (reference_id)";
if (seaf_db_query (db, sql) < 0)
return -1;
sql = "CREATE TABLE IF NOT EXISTS Binding (email TEXT, peer_id TEXT)";
if (seaf_db_query (db, sql) < 0)
return -1;
sql = "CREATE INDEX IF NOT EXISTS email_index on Binding (email)";
if (seaf_db_query (db, sql) < 0)
return -1;
sql = "CREATE UNIQUE INDEX IF NOT EXISTS peer_index on Binding (peer_id)";
if (seaf_db_query (db, sql) < 0)
return -1;
sql = "CREATE TABLE IF NOT EXISTS UserRole (email TEXT, role TEXT)";
if (seaf_db_query (db, sql) < 0)
return -1;
sql = "CREATE INDEX IF NOT EXISTS userrole_email_index on UserRole (email)";
if (seaf_db_query (db, sql) < 0)
return -1;
sql = "CREATE UNIQUE INDEX IF NOT EXISTS userrole_userrole_index on UserRole (email, role)";
if (seaf_db_query (db, sql) < 0)
return -1;
sql = "CREATE TABLE IF NOT EXISTS LDAPConfig (cfg_group VARCHAR(255) NOT NULL,"
"cfg_key VARCHAR(255) NOT NULL, value VARCHAR(255), property INTEGER)";
if (seaf_db_query (db, sql) < 0)
return -1;
} else if (db_type == SEAF_DB_TYPE_PGSQL) {
sql = "CREATE TABLE IF NOT EXISTS EmailUser ("
"id SERIAL PRIMARY KEY, "
"email VARCHAR(255), passwd VARCHAR(256), "
"is_staff INTEGER NOT NULL, is_active INTEGER NOT NULL, "
"ctime BIGINT, reference_id VARCHAR(255), UNIQUE (email))";
if (seaf_db_query (db, sql) < 0)
return -1;
//if (!pgsql_index_exists (db, "emailuser_reference_id_idx")) {
// sql = "CREATE UNIQUE INDEX emailuser_reference_id_idx ON EmailUser (reference_id)";
// if (seaf_db_query (db, sql) < 0)
// return -1;
//}
sql = "CREATE TABLE IF NOT EXISTS Binding (email VARCHAR(255), peer_id CHAR(41),"
"UNIQUE (peer_id))";
if (seaf_db_query (db, sql) < 0)
return -1;
sql = "CREATE TABLE IF NOT EXISTS UserRole (email VARCHAR(255), "
" role VARCHAR(255), UNIQUE (email, role))";
if (seaf_db_query (db, sql) < 0)
return -1;
//if (!pgsql_index_exists (db, "userrole_email_idx")) {
// sql = "CREATE INDEX userrole_email_idx ON UserRole (email)";
// if (seaf_db_query (db, sql) < 0)
// return -1;
//}
sql = "CREATE TABLE IF NOT EXISTS LDAPConfig (cfg_group VARCHAR(255) NOT NULL,"
"cfg_key VARCHAR(255) NOT NULL, value VARCHAR(255), property INTEGER)";
if (seaf_db_query (db, sql) < 0)
return -1;
}
return 0;
}
static CcnetDB *
open_sqlite_db (CcnetUserManager *manager)
{
CcnetDB *db = NULL;
char *db_dir;
char *db_path;
db_dir = g_build_filename (manager->session->ccnet_dir, "PeerMgr", NULL);
if (checkdir_with_mkdir(db_dir) < 0) {
ccnet_error ("Cannot open db dir %s: %s\n", db_dir,
strerror(errno));
return NULL;
}
g_free (db_dir);
db_path = g_build_filename (manager->session->ccnet_dir, "PeerMgr",
"usermgr.db", NULL);
db = seaf_db_new_sqlite (db_path, DEFAULT_MAX_CONNECTIONS);
g_free (db_path);
return db;
}
static int
open_db (CcnetUserManager *manager)
{
CcnetDB *db = NULL;
switch (seaf_db_type(manager->session->ccnet_db)) {
/* To be compatible with the db file layout of 0.9.1 version,
* we don't use conf-dir/ccnet.db for user and peer info, but
* user conf-dir/PeerMgr/peermgr.db and conf-dir/PeerMgr/usermgr.db instead.
*/
case SEAF_DB_TYPE_SQLITE:
db = open_sqlite_db (manager);
break;
case SEAF_DB_TYPE_PGSQL:
case SEAF_DB_TYPE_MYSQL:
db = manager->session->ccnet_db;
break;
}
if (!db)
return -1;
manager->priv->db = db;
if ((manager->session->ccnet_create_tables || seaf_db_type(db) == SEAF_DB_TYPE_PGSQL)
&& check_db_table (db) < 0) {
ccnet_warning ("Failed to create user db tables.\n");
return -1;
}
return 0;
}
/* -------- EmailUser Management -------- */
/* This fixed salt is used in very early versions. It's kept for compatibility.
* For the current password hashing algorithm, please see hash_password_pbkdf2_sha256()
*/
static unsigned char salt[8] = { 0xdb, 0x91, 0x45, 0xc3, 0x06, 0xc7, 0xcc, 0x26 };
static void
hash_password (const char *passwd, char *hashed_passwd)
{
unsigned char sha1[20];
SHA_CTX s;
SHA1_Init (&s);
SHA1_Update (&s, passwd, strlen(passwd));
SHA1_Final (sha1, &s);
rawdata_to_hex (sha1, hashed_passwd, 20);
}
static void
hash_password_salted (const char *passwd, char *hashed_passwd)
{
unsigned char sha[SHA256_DIGEST_LENGTH];
SHA256_CTX s;
SHA256_Init (&s);
SHA256_Update (&s, passwd, strlen(passwd));
SHA256_Update (&s, salt, sizeof(salt));
SHA256_Final (sha, &s);
rawdata_to_hex (sha, hashed_passwd, SHA256_DIGEST_LENGTH);
}
static void
hash_password_pbkdf2_sha256 (const char *passwd,
int iterations,
char **db_passwd)
{
guint8 sha[SHA256_DIGEST_LENGTH];
guint8 salt[SHA256_DIGEST_LENGTH];
char hashed_passwd[SHA256_DIGEST_LENGTH*2+1];
char salt_str[SHA256_DIGEST_LENGTH*2+1];
if (!RAND_bytes (salt, sizeof(salt))) {
ccnet_warning ("Failed to generate salt "
"with RAND_bytes(), use RAND_pseudo_bytes().\n");
RAND_pseudo_bytes (salt, sizeof(salt));
}
PKCS5_PBKDF2_HMAC (passwd, strlen(passwd),
salt, sizeof(salt),
iterations,
EVP_sha256(),
sizeof(sha), sha);
rawdata_to_hex (sha, hashed_passwd, SHA256_DIGEST_LENGTH);
rawdata_to_hex (salt, salt_str, SHA256_DIGEST_LENGTH);
/* Encode password hash related information into one string, similar to Django. */
GString *buf = g_string_new (NULL);
g_string_printf (buf, "PBKDF2SHA256$%d$%s$%s",
iterations, salt_str, hashed_passwd);
*db_passwd = g_string_free (buf, FALSE);
}
static gboolean
validate_passwd_pbkdf2_sha256 (const char *passwd, const char *db_passwd)
{
char **tokens;
char *salt_str, *hash;
int iter;
guint8 sha[SHA256_DIGEST_LENGTH];
guint8 salt[SHA256_DIGEST_LENGTH];
char hashed_passwd[SHA256_DIGEST_LENGTH*2+1];
if (g_strcmp0 (db_passwd, "!") == 0)
return FALSE;
tokens = g_strsplit (db_passwd, "$", -1);
if (!tokens || g_strv_length (tokens) != 4) {
if (tokens)
g_strfreev (tokens);
ccnet_warning ("Invalide db passwd format %s.\n", db_passwd);
return FALSE;
}
iter = atoi (tokens[1]);
salt_str = tokens[2];
hash = tokens[3];
hex_to_rawdata (salt_str, salt, SHA256_DIGEST_LENGTH);
PKCS5_PBKDF2_HMAC (passwd, strlen(passwd),
salt, sizeof(salt),
iter,
EVP_sha256(),
sizeof(sha), sha);
rawdata_to_hex (sha, hashed_passwd, SHA256_DIGEST_LENGTH);
gboolean ret = (strcmp (hash, hashed_passwd) == 0);
g_strfreev (tokens);
return ret;
}
static gboolean
validate_passwd (const char *passwd, const char *stored_passwd,
gboolean *need_upgrade)
{
char hashed_passwd[SHA256_DIGEST_LENGTH * 2 + 1];
int hash_len = strlen(stored_passwd);
*need_upgrade = FALSE;
if (hash_len == SHA256_DIGEST_LENGTH * 2) {
hash_password_salted (passwd, hashed_passwd);
*need_upgrade = TRUE;
} else if (hash_len == SHA_DIGEST_LENGTH * 2) {
hash_password (passwd, hashed_passwd);
*need_upgrade = TRUE;
} else {
return validate_passwd_pbkdf2_sha256 (passwd, stored_passwd);
}
if (strcmp (hashed_passwd, stored_passwd) == 0)
return TRUE;
else
return FALSE;
}
static int
update_user_passwd (CcnetUserManager *manager,
const char *email, const char *passwd)
{
CcnetDB *db = manager->priv->db;
char *db_passwd = NULL;
int ret;
hash_password_pbkdf2_sha256 (passwd, manager->passwd_hash_iter,
&db_passwd);
/* convert email to lower case for case insensitive lookup. */
char *email_down = g_ascii_strdown (email, strlen(email));
ret = seaf_db_statement_query (db,
"UPDATE EmailUser SET passwd=? WHERE email=?",
2, "string", db_passwd, "string", email_down);
g_free (db_passwd);
g_free (email_down);
if (ret < 0)
return ret;
return 0;
}
int
ccnet_user_manager_add_emailuser (CcnetUserManager *manager,
const char *email,
const char *passwd,
int is_staff, int is_active)
{
CcnetDB *db = manager->priv->db;
gint64 now = get_current_time();
char *db_passwd = NULL;
int ret;
if (!check_user_number (manager, FALSE)) {
return -1;
}
/* A user with unhashed "!" as password cannot be logged in.
* Such users are created for book keeping, such as users from
* Shibboleth.
*/
if (g_strcmp0 (passwd, "!") != 0)
hash_password_pbkdf2_sha256 (passwd, manager->passwd_hash_iter,
&db_passwd);
else
db_passwd = g_strdup(passwd);
/* convert email to lower case for case insensitive lookup. */
char *email_down = g_ascii_strdown (email, strlen(email));
ret = seaf_db_statement_query (db,
"INSERT INTO EmailUser(email, passwd, is_staff, "
"is_active, ctime) VALUES (?, ?, ?, ?, ?)",
5, "string", email_down, "string", db_passwd,
"int", is_staff, "int", is_active, "int64", now);
g_free (db_passwd);
g_free (email_down);
if (ret < 0)
return ret;
return 0;
}
int
ccnet_user_manager_remove_emailuser (CcnetUserManager *manager,
const char *source,
const char *email)
{
CcnetDB *db = manager->priv->db;
int ret;
seaf_db_statement_query (db,
"DELETE FROM UserRole WHERE email=?",
1, "string", email);
if (strcmp (source, "DB") == 0) {
ret = seaf_db_statement_query (db,
"DELETE FROM EmailUser WHERE email=?",
1, "string", email);
return ret;
}
return -1;
}
static gboolean
get_password (CcnetDBRow *row, void *data)
{
char **p_passwd = data;
*p_passwd = g_strdup(seaf_db_row_get_column_text (row, 0));
return FALSE;
}
int
ccnet_user_manager_validate_emailuser (CcnetUserManager *manager,
const char *email,
const char *passwd)
{
CcnetDB *db = manager->priv->db;
int ret = -1;
char *sql;
char *email_down;
char *login_id;
char *stored_passwd = NULL;
gboolean need_upgrade = FALSE;
/* Users with password "!" are for internal book keeping only. */
if (g_strcmp0 (passwd, "!") == 0)
return -1;
login_id = ccnet_user_manager_get_login_id (manager, email);
if (!login_id) {
ccnet_warning ("Failed to get login_id for %s\n", email);
return -1;
}
sql = "SELECT passwd FROM EmailUser WHERE email=?";
if (seaf_db_statement_foreach_row (db, sql,
get_password, &stored_passwd,
1, "string", login_id) > 0) {
if (validate_passwd (passwd, stored_passwd, &need_upgrade)) {
if (need_upgrade)
update_user_passwd (manager, login_id, passwd);
ret = 0;
goto out;
} else {
goto out;
}
}
email_down = g_ascii_strdown (email, strlen(login_id));
if (seaf_db_statement_foreach_row (db, sql,
get_password, &stored_passwd,
1, "string", email_down) > 0) {
g_free (email_down);
if (validate_passwd (passwd, stored_passwd, &need_upgrade)) {
if (need_upgrade)
update_user_passwd (manager, login_id, passwd);
ret = 0;
goto out;
} else {
goto out;
}
}
g_free (email_down);
out:
g_free (login_id);
g_free (stored_passwd);
return ret;
}
static gboolean
get_emailuser_cb (CcnetDBRow *row, void *data)
{
CcnetEmailUser **p_emailuser = data;
int id = seaf_db_row_get_column_int (row, 0);
const char *email = (const char *)seaf_db_row_get_column_text (row, 1);
int is_staff = seaf_db_row_get_column_int (row, 2);
int is_active = seaf_db_row_get_column_int (row, 3);
gint64 ctime = seaf_db_row_get_column_int64 (row, 4);
const char *password = seaf_db_row_get_column_text (row, 5);
const char *reference_id = seaf_db_row_get_column_text (row, 6);
const char *role = seaf_db_row_get_column_text (row, 7);
char *email_l = g_ascii_strdown (email, -1);
*p_emailuser = g_object_new (CCNET_TYPE_EMAIL_USER,
"id", id,
"email", email_l,
"is_staff", is_staff,
"is_active", is_active,
"ctime", ctime,
"source", "DB",
"password", password,
"reference_id", reference_id,
"role", role ? role : "",
NULL);
g_free (email_l);
return FALSE;
}
static char*
ccnet_user_manager_get_role_emailuser (CcnetUserManager *manager,
const char* email);
static CcnetEmailUser*
get_emailuser (CcnetUserManager *manager,
const char *email,
gboolean import,
GError **error)
{
CcnetDB *db = manager->priv->db;
char *sql;
CcnetEmailUser *emailuser = NULL;
char *email_down;
int rc;
sql = "SELECT e.id, e.email, is_staff, is_active, ctime, passwd, reference_id, role "
" FROM EmailUser e LEFT JOIN UserRole ON e.email = UserRole.email "
" WHERE e.email=?";
rc = seaf_db_statement_foreach_row (db, sql, get_emailuser_cb, &emailuser,
1, "string", email);
if (rc > 0) {
return emailuser;
} else if (rc < 0) {
if (error) {
g_set_error (error, CCNET_DOMAIN, CCNET_ERR_INTERNAL, "Database error");
}
return NULL;
}
email_down = g_ascii_strdown (email, strlen(email));
rc = seaf_db_statement_foreach_row (db, sql, get_emailuser_cb, &emailuser,
1, "string", email_down);
if (rc > 0) {
g_free (email_down);
return emailuser;
} else if (rc < 0) {
if (error) {
g_set_error (error, CCNET_DOMAIN, CCNET_ERR_INTERNAL, "Database error");
}
g_free (email_down);
return NULL;
}
g_free (email_down);
return NULL;
}
CcnetEmailUser*
ccnet_user_manager_get_emailuser (CcnetUserManager *manager,
const char *email,
GError **error)
{
return get_emailuser (manager, email, FALSE, error);
}
CcnetEmailUser*
ccnet_user_manager_get_emailuser_with_import (CcnetUserManager *manager,
const char *email,
GError **error)
{
return get_emailuser (manager, email, TRUE, error);
}
CcnetEmailUser*
ccnet_user_manager_get_emailuser_by_id (CcnetUserManager *manager, int id)
{
CcnetDB *db = manager->priv->db;
char *sql;
CcnetEmailUser *emailuser = NULL;
sql = "SELECT e.id, e.email, is_staff, is_active, ctime, passwd, reference_id, role "
" FROM EmailUser e LEFT JOIN UserRole ON e.email = UserRole.email "
" WHERE e.id=?";
if (seaf_db_statement_foreach_row (db, sql, get_emailuser_cb, &emailuser,
1, "int", id) < 0)
return NULL;
return emailuser;
}
static gboolean
get_emailusers_cb (CcnetDBRow *row, void *data)
{
GList **plist = data;
CcnetEmailUser *emailuser;
int id = seaf_db_row_get_column_int (row, 0);
const char *email = (const char *)seaf_db_row_get_column_text (row, 1);
int is_staff = seaf_db_row_get_column_int (row, 2);
int is_active = seaf_db_row_get_column_int (row, 3);
gint64 ctime = seaf_db_row_get_column_int64 (row, 4);
const char *role = (const char *)seaf_db_row_get_column_text (row, 5);
const char *password = seaf_db_row_get_column_text (row, 6);
char *email_l = g_ascii_strdown (email, -1);
emailuser = g_object_new (CCNET_TYPE_EMAIL_USER,
"id", id,
"email", email_l,
"is_staff", is_staff,
"is_active", is_active,
"ctime", ctime,
"role", role ? role : "",
"source", "DB",
"password", password,
NULL);
g_free (email_l);
*plist = g_list_prepend (*plist, emailuser);
return TRUE;
}
GList*
ccnet_user_manager_get_emailusers (CcnetUserManager *manager,
const char *source,
int start, int limit,
const char *status)
{
CcnetDB *db = manager->priv->db;
const char *status_condition = "";
char *sql = NULL;
GList *ret = NULL;
int rc;
if (g_strcmp0 (source, "DB") != 0)
return NULL;
if (start == -1 && limit == -1) {
if (g_strcmp0(status, "active") == 0)
status_condition = "WHERE t1.is_active = 1";
else if (g_strcmp0(status, "inactive") == 0)
status_condition = "WHERE t1.is_active = 0";
sql = g_strdup_printf ("SELECT t1.id, t1.email, "
"t1.is_staff, t1.is_active, t1.ctime, "
"t2.role, t1.passwd FROM EmailUser t1 "
"LEFT JOIN UserRole t2 "
"ON t1.email = t2.email %s "
"WHERE t1.email NOT LIKE '%%@seafile_group'",
status_condition);
rc = seaf_db_statement_foreach_row (db,
sql,
get_emailusers_cb, &ret,
0);
g_free (sql);
} else {
if (g_strcmp0(status, "active") == 0)
status_condition = "WHERE t1.is_active = 1";
else if (g_strcmp0(status, "inactive") == 0)
status_condition = "WHERE t1.is_active = 0";
sql = g_strdup_printf ("SELECT t1.id, t1.email, "
"t1.is_staff, t1.is_active, t1.ctime, "
"t2.role, t1.passwd FROM EmailUser t1 "
"LEFT JOIN UserRole t2 "
"ON t1.email = t2.email %s "
"WHERE t1.email NOT LIKE '%%@seafile_group' "
"ORDER BY t1.id LIMIT ? OFFSET ?",
status_condition);
rc = seaf_db_statement_foreach_row (db,
sql,
get_emailusers_cb, &ret,
2, "int", limit, "int", start);
g_free (sql);
}
if (rc < 0) {
while (ret != NULL) {
g_object_unref (ret->data);
ret = g_list_delete_link (ret, ret);
}
return NULL;
}
return g_list_reverse (ret);
}
GList*
ccnet_user_manager_search_emailusers (CcnetUserManager *manager,
const char *source,
const char *keyword,
int start, int limit)
{
CcnetDB *db = manager->priv->db;
GList *ret = NULL;
int rc;
char *db_patt = g_strdup_printf ("%%%s%%", keyword);
if (strcmp (source, "DB") != 0) {
g_free (db_patt);
return NULL;
}
if (start == -1 && limit == -1)
rc = seaf_db_statement_foreach_row (db,
"SELECT t1.id, t1.email, "
"t1.is_staff, t1.is_active, t1.ctime, "
"t2.role, t1.passwd FROM EmailUser t1 "
"LEFT JOIN UserRole t2 "
"ON t1.email = t2.email "
"WHERE t1.Email LIKE ? "
"AND t1.email NOT LIKE '%%@seafile_group' "
"ORDER BY t1.id",
get_emailusers_cb, &ret,
1, "string", db_patt);
else
rc = seaf_db_statement_foreach_row (db,
"SELECT t1.id, t1.email, "
"t1.is_staff, t1.is_active, t1.ctime, "
"t2.role, t1.passwd FROM EmailUser t1 "
"LEFT JOIN UserRole t2 "
"ON t1.email = t2.email "
"WHERE t1.Email LIKE ? "
"AND t1.email NOT LIKE '%%@seafile_group' "
"ORDER BY t1.id LIMIT ? OFFSET ?",
get_emailusers_cb, &ret,
3, "string", db_patt,
"int", limit, "int", start);
g_free (db_patt);
if (rc < 0) {
while (ret != NULL) {
g_object_unref (ret->data);
ret = g_list_delete_link (ret, ret);
}
return NULL;
}
return g_list_reverse (ret);
}
gint64
ccnet_user_manager_count_emailusers (CcnetUserManager *manager, const char *source)
{
CcnetDB* db = manager->priv->db;
char sql[512];
gint64 ret;
if (g_strcmp0 (source, "DB") != 0)
return -1;
snprintf (sql, 512, "SELECT COUNT(id) FROM EmailUser WHERE is_active = 1");
ret = seaf_db_get_int64 (db, sql);
if (ret < 0)
return -1;
return ret;
}
gint64
ccnet_user_manager_count_inactive_emailusers (CcnetUserManager *manager, const char *source)
{
CcnetDB* db = manager->priv->db;
char sql[512];
gint64 ret;
if (g_strcmp0 (source, "DB") != 0)
return -1;
snprintf (sql, 512, "SELECT COUNT(id) FROM EmailUser WHERE is_active = 0");
ret = seaf_db_get_int64 (db, sql);
if (ret < 0)
return -1;
return ret;
}
#if 0
GList*
ccnet_user_manager_filter_emailusers_by_emails(CcnetUserManager *manager,
const char *emails)
{
CcnetDB *db = manager->priv->db;
char *copy = g_strdup (emails), *saveptr;
GList *ret = NULL;
GString *sql = g_string_new(NULL);
g_string_append (sql, "SELECT * FROM EmailUser WHERE Email IN (");
char *name = strtok_r (copy, ", ", &saveptr);
while (name != NULL) {
g_string_append_printf (sql, "'%s',", name);
name = strtok_r (NULL, ", ", &saveptr);
}
g_string_erase (sql, sql->len-1, 1); /* remove last "," */
g_string_append (sql, ")");
if (seaf_db_foreach_selected_row (db, sql->str, get_emailusers_cb,
&ret) < 0) {
while (ret != NULL) {
g_object_unref (ret->data);
ret = g_list_delete_link (ret, ret);
}
return NULL;
}
g_free (copy);
g_string_free (sql, TRUE);
return g_list_reverse (ret);
}
#endif
int
ccnet_user_manager_update_emailuser (CcnetUserManager *manager,
const char *source,
int id, const char* passwd,
int is_staff, int is_active)
{
CcnetDB* db = manager->priv->db;
char *db_passwd = NULL;
// in case set user user1 to inactive, then add another active user user2,
// if current user num already the max user num,
// then reset user1 to active should fail
if (is_active && !check_user_number (manager, FALSE)) {
return -1;
}
if (strcmp (source, "DB") == 0) {
if (g_strcmp0 (passwd, "!") == 0) {
/* Don't update passwd if it starts with '!' */
return seaf_db_statement_query (db, "UPDATE EmailUser SET is_staff=?, "
"is_active=? WHERE id=?",
3, "int", is_staff, "int", is_active,
"int", id);
} else {
hash_password_pbkdf2_sha256 (passwd, manager->passwd_hash_iter, &db_passwd);
return seaf_db_statement_query (db, "UPDATE EmailUser SET passwd=?, "
"is_staff=?, is_active=? WHERE id=?",
4, "string", db_passwd, "int", is_staff,
"int", is_active, "int", id);
}
}
return -1;
}
static gboolean
get_role_emailuser_cb (CcnetDBRow *row, void *data)
{
*((char **)data) = g_strdup (seaf_db_row_get_column_text (row, 0));
return FALSE;
}
static char*
ccnet_user_manager_get_role_emailuser (CcnetUserManager *manager,
const char* email)
{
CcnetDB *db = manager->priv->db;
const char *sql;
char* role;
sql = "SELECT role FROM UserRole WHERE email=?";
if (seaf_db_statement_foreach_row (db, sql, get_role_emailuser_cb, &role,
1, "string", email) > 0)
return role;
return NULL;
}
int
ccnet_user_manager_update_role_emailuser (CcnetUserManager *manager,
const char* email, const char* role)
{
CcnetDB* db = manager->priv->db;
char *old_role = ccnet_user_manager_get_role_emailuser (manager, email);
if (old_role) {
g_free (old_role);
return seaf_db_statement_query (db, "UPDATE UserRole SET role=? "
"WHERE email=?",
2, "string", role, "string", email);
} else
return seaf_db_statement_query (db, "INSERT INTO UserRole(role, email)"
" VALUES (?, ?)",
2, "string", role, "string", email);
}
GList*
ccnet_user_manager_get_superusers(CcnetUserManager *manager)
{
CcnetDB* db = manager->priv->db;
GList *ret = NULL;
char sql[512];
snprintf (sql, 512,
"SELECT t1.id, t1.email, "
"t1.is_staff, t1.is_active, t1.ctime, "
"t2.role, t1.passwd FROM EmailUser t1 "
"LEFT JOIN UserRole t2 "
"ON t1.email = t2.email "
"WHERE is_staff = 1 AND t1.email NOT LIKE '%%@seafile_group';");
if (seaf_db_foreach_selected_row (db, sql, get_emailusers_cb, &ret) < 0) {
while (ret != NULL) {
g_object_unref (ret->data);
ret = g_list_delete_link (ret, ret);
}
return NULL;
}
return g_list_reverse (ret);
}
char *
ccnet_user_manager_get_login_id (CcnetUserManager *manager, const char *primary_id)
{
return g_strdup (primary_id);
}
GList *
ccnet_user_manager_get_emailusers_in_list (CcnetUserManager *manager,
const char *source,
const char *user_list,
GError **error)
{
int i;
const char *username;
json_t *j_array = NULL, *j_obj;
json_error_t j_error;
GList *ret = NULL;
const char *args[20];
j_array = json_loadb (user_list, strlen(user_list), 0, &j_error);
if (!j_array) {
g_set_error (error, CCNET_DOMAIN, 0, "Bad args.");
return NULL;
}
/* Query 20 users at most. */
size_t user_num = json_array_size (j_array);
if (user_num > 20) {
g_set_error (error, CCNET_DOMAIN, 0, "Number of users exceeds 20.");
json_decref (j_array);
return NULL;
}
GString *sql = g_string_new ("");
for (i = 0; i < 20; i++) {
if (i < user_num) {
j_obj = json_array_get (j_array, i);
username = json_string_value(j_obj);
args[i] = username;
} else {
args[i] = "";
}
}
if (strcmp (source, "DB") != 0)
goto out;
g_string_printf (sql, "SELECT e.id, e.email, is_staff, is_active, ctime, "
"role, passwd FROM EmailUser e "
"LEFT JOIN UserRole r ON e.email = r.email "
"WHERE e.email IN (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)");
if (seaf_db_statement_foreach_row (manager->priv->db, sql->str, get_emailusers_cb, &ret, 20,
"string", args[0], "string", args[1], "string", args[2],
"string", args[3], "string", args[4], "string", args[5],
"string", args[6], "string", args[7], "string", args[8],
"string", args[9], "string", args[10], "string", args[11],
"string", args[12], "string", args[13], "string", args[14],
"string", args[15], "string", args[16], "string", args[17],
"string", args[18], "string", args[19]) < 0)
ccnet_warning("Failed to get users in list %s.\n", user_list);
out:
json_decref (j_array);
g_string_free (sql, TRUE);
return ret;
}
int
ccnet_user_manager_update_emailuser_id (CcnetUserManager *manager,
const char *old_email,
const char *new_email,
GError **error)
{
int ret = -1;
int rc;
GString *sql = g_string_new ("");
//1.update RepoOwner
g_string_printf (sql, "UPDATE RepoOwner SET owner_id=? WHERE owner_id=?");
rc = seaf_db_statement_query (seaf->db, sql->str, 2,
"string", new_email,
"string", old_email);
if (rc < 0){
ccnet_warning ("Failed to update repo owner\n");
goto out;
}
//2.update SharedRepo
g_string_printf (sql, "UPDATE SharedRepo SET from_email=? WHERE from_email=?");
rc = seaf_db_statement_query (seaf->db, sql->str, 2,
"string", new_email,
"string", old_email);
if (rc < 0){
ccnet_warning ("Failed to update from_email\n");
goto out;
}
g_string_printf (sql, "UPDATE SharedRepo SET to_email=? WHERE to_email=?");
rc = seaf_db_statement_query (seaf->db, sql->str, 2,
"string", new_email,
"string", old_email);
if (rc < 0){
ccnet_warning ("Failed to update to_email\n");
goto out;
}
//3.update GroupUser
rc = ccnet_group_manager_update_group_user (seaf->group_mgr, old_email, new_email);
if (rc < 0){
ccnet_warning ("Failed to update group member\n");
goto out;
}
//4.update RepoUserToken
g_string_printf (sql, "UPDATE RepoUserToken SET email=? WHERE email=?");
rc = seaf_db_statement_query (seaf->db, sql->str, 2,
"string", new_email,
"string", old_email);
if (rc < 0){
ccnet_warning ("Failed to update repo user token\n");
goto out;
}
//5.uptede FolderUserPerm
g_string_printf (sql, "UPDATE FolderUserPerm SET user=? WHERE user=?");
rc = seaf_db_statement_query (seaf->db, sql->str, 2,
"string", new_email,
"string", old_email);
if (rc < 0){
ccnet_warning ("Failed to update user folder permission\n");
goto out;
}
//6.update EmailUser
g_string_printf (sql, "UPDATE EmailUser SET email=? WHERE email=?");
rc = seaf_db_statement_query (manager->priv->db, sql->str, 2,
"string", new_email,
"string", old_email);
if (rc < 0){
ccnet_warning ("Failed to update email user\n");
goto out;
}
//7.update UserQuota
g_string_printf (sql, "UPDATE UserQuota SET user=? WHERE user=?");
rc = seaf_db_statement_query (seaf->db, sql->str, 2,
"string", new_email,
"string", old_email);
if (rc < 0){
ccnet_warning ("Failed to update user quota\n");
goto out;
}
ret = 0;
out:
g_string_free (sql, TRUE);
return ret;
}
================================================
FILE: common/user-mgr.h
================================================
/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
#ifndef CCNET_USER_MGR_H
#define CCNET_USER_MGR_H
#include
#include
#define CCNET_TYPE_USER_MANAGER (ccnet_user_manager_get_type ())
#define CCNET_USER_MANAGER(obj) (G_TYPE_CHECK_INSTANCE_CAST ((obj), CCNET_TYPE_USER_MANAGER, CcnetUserManager))
#define CCNET_IS_USER_MANAGER(obj) (G_TYPE_CHECK_INSTANCE_TYPE ((obj), CCNET_TYPE_USER_MANAGER))
#define CCNET_USER_MANAGER_CLASS(klass) (G_TYPE_CHECK_CLASS_CAST ((klass), CCNET_TYPE_USER_MANAGER, CcnetUserManagerClass))
#define CCNET_IS_USER_MANAGER_CLASS(klass) (G_TYPE_CHECK_CLASS_TYPE ((klass), CCNET_TYPE_USER_MANAGER))
#define CCNET_USER_MANAGER_GET_CLASS(obj) (G_TYPE_INSTANCE_GET_CLASS ((obj), CCNET_TYPE_USER_MANAGER, CcnetUserManagerClass))
typedef struct _SeafileSession SeafileSession;
typedef struct _CcnetUserManager CcnetUserManager;
typedef struct _CcnetUserManagerClass CcnetUserManagerClass;
typedef struct CcnetUserManagerPriv CcnetUserManagerPriv;
struct _CcnetUserManager
{
GObject parent_instance;
SeafileSession *session;
char *userdb_path;
GHashTable *user_hash;
#ifdef HAVE_LDAP
/* LDAP related */
gboolean use_ldap;
char *ldap_host;
#ifdef WIN32
gboolean use_ssl;
#endif
char **base_list; /* base DN from where all users can be reached */
char *filter; /* Additional search filter */
char *user_dn; /* DN of the admin user */
char *password; /* password for admin user */
char *login_attr; /* attribute name used for login */
gboolean follow_referrals; /* Follow referrals returned by the server. */
#endif
int passwd_hash_iter;
CcnetUserManagerPriv *priv;
};
struct _CcnetUserManagerClass
{
GObjectClass parent_class;
};
GType ccnet_user_manager_get_type (void);
CcnetUserManager* ccnet_user_manager_new (SeafileSession *);
int ccnet_user_manager_prepare (CcnetUserManager *manager);
void ccnet_user_manager_free (CcnetUserManager *manager);
void ccnet_user_manager_start (CcnetUserManager *manager);
void
ccnet_user_manager_set_max_users (CcnetUserManager *manager, gint64 max_users);
int
ccnet_user_manager_add_emailuser (CcnetUserManager *manager,
const char *email,
const char *encry_passwd,
int is_staff, int is_active);
int
ccnet_user_manager_remove_emailuser (CcnetUserManager *manager,
const char *source,
const char *email);
int
ccnet_user_manager_validate_emailuser (CcnetUserManager *manager,
const char *email,
const char *passwd);
CcnetEmailUser*
ccnet_user_manager_get_emailuser (CcnetUserManager *manager, const char *email, GError **error);
CcnetEmailUser*
ccnet_user_manager_get_emailuser_with_import (CcnetUserManager *manager,
const char *email,
GError **error);
CcnetEmailUser*
ccnet_user_manager_get_emailuser_by_id (CcnetUserManager *manager, int id);
/*
* @source: "DB" or "LDAP".
* @status: "", "active", or "inactive". returns all users when this argument is "".
*/
GList*
ccnet_user_manager_get_emailusers (CcnetUserManager *manager,
const char *source,
int start, int limit,
const char *status);
GList*
ccnet_user_manager_search_emailusers (CcnetUserManager *manager,
const char *source,
const char *keyword,
int start, int limit);
gint64
ccnet_user_manager_count_emailusers (CcnetUserManager *manager, const char *source);
gint64
ccnet_user_manager_count_inactive_emailusers (CcnetUserManager *manager, const char *source);
GList*
ccnet_user_manager_filter_emailusers_by_emails(CcnetUserManager *manager,
const char *emails);
int
ccnet_user_manager_update_emailuser (CcnetUserManager *manager,
const char *source,
int id, const char* passwd,
int is_staff, int is_active);
int
ccnet_user_manager_update_role_emailuser (CcnetUserManager *manager,
const char* email, const char* role);
GList*
ccnet_user_manager_get_superusers(CcnetUserManager *manager);
/* Remove one specific peer-id binding to an email */
char *
ccnet_user_manager_get_login_id (CcnetUserManager *manager,
const char *primary_id);
GList *
ccnet_user_manager_get_emailusers_in_list (CcnetUserManager *manager,
const char *source,
const char *user_list,
GError **error);
int
ccnet_user_manager_update_emailuser_id (CcnetUserManager *manager,
const char *old_email,
const char *new_email,
GError **error);
#endif
================================================
FILE: common/vc-common.c
================================================
#include "common.h"
#include "seafile-session.h"
#include "vc-common.h"
#include "log.h"
#include "seafile-error.h"
static GList *
merge_bases_many (SeafCommit *one, int n, SeafCommit **twos);
static gint
compare_commit_by_time (gconstpointer a, gconstpointer b, gpointer unused)
{
const SeafCommit *commit_a = a;
const SeafCommit *commit_b = b;
/* Latest commit comes first in the list. */
return (commit_b->ctime - commit_a->ctime);
}
static gint
compare_commit (gconstpointer a, gconstpointer b)
{
const SeafCommit *commit_a = a;
const SeafCommit *commit_b = b;
return strcmp (commit_a->commit_id, commit_b->commit_id);
}
static gboolean
add_to_commit_hash (SeafCommit *commit, void *vhash, gboolean *stop)
{
GHashTable *hash = vhash;
char *key = g_strdup (commit->commit_id);
g_hash_table_replace (hash, key, key);
return TRUE;
}
static GHashTable *
commit_tree_to_hash (SeafCommit *head)
{
GHashTable *hash;
gboolean res;
hash = g_hash_table_new_full (g_str_hash, g_str_equal, g_free, NULL);
res = seaf_commit_manager_traverse_commit_tree (seaf->commit_mgr,
head->repo_id,
head->version,
head->commit_id,
add_to_commit_hash,
hash, FALSE);
if (!res)
goto fail;
return hash;
fail:
g_hash_table_destroy (hash);
return NULL;
}
static GList *
get_independent_commits (GList *commits)
{
SeafCommit **rslt;
GList *list, *result;
int cnt, i, j;
SeafCommit *c;
g_debug ("Get independent commits.\n");
cnt = g_list_length (commits);
rslt = calloc(cnt, sizeof(*rslt));
for (list = commits, i = 0; list; list = list->next)
rslt[i++] = list->data;
g_list_free (commits);
for (i = 0; i < cnt - 1; i++) {
for (j = i+1; j < cnt; j++) {
if (!rslt[i] || !rslt[j])
continue;
result = merge_bases_many(rslt[i], 1, &rslt[j]);
for (list = result; list; list = list->next) {
c = list->data;
/* If two commits have fast-forward relationship,
* drop the older one.
*/
if (strcmp (rslt[i]->commit_id, c->commit_id) == 0) {
seaf_commit_unref (rslt[i]);
rslt[i] = NULL;
}
if (strcmp (rslt[j]->commit_id, c->commit_id) == 0) {
seaf_commit_unref (rslt[j]);
rslt[j] = NULL;
}
seaf_commit_unref (c);
}
}
}
/* Surviving ones in rslt[] are the independent results */
result = NULL;
for (i = 0; i < cnt; i++) {
if (rslt[i])
result = g_list_insert_sorted_with_data (result, rslt[i],
compare_commit_by_time,
NULL);
}
free(rslt);
return result;
}
typedef struct {
GList *result;
GHashTable *commit_hash;
} MergeTraverseData;
static gboolean
get_merge_bases (SeafCommit *commit, void *vdata, gboolean *stop)
{
MergeTraverseData *data = vdata;
/* Found a common ancestor.
* Dont traverse its parenets.
*/
if (g_hash_table_lookup (data->commit_hash, commit->commit_id)) {
if (!g_list_find_custom (data->result, commit, compare_commit)) {
data->result = g_list_insert_sorted_with_data (data->result, commit,
compare_commit_by_time,
NULL);
seaf_commit_ref (commit);
}
*stop = TRUE;
}
return TRUE;
}
/*
* Merge "one" with commits in "twos".
* The ancestors returned may not be ancestors for all the input commits.
* They are common ancestors for one and some commits in twos array.
*/
static GList *
merge_bases_many (SeafCommit *one, int n, SeafCommit **twos)
{
GHashTable *commit_hash;
GList *result = NULL;
SeafCommit *commit;
int i;
MergeTraverseData data;
gboolean res;
for (i = 0; i < n; i++) {
if (one == twos[i])
return g_list_append (result, one);
}
/* First construct a hash table of all commit ids rooted at one. */
commit_hash = commit_tree_to_hash (one);
if (!commit_hash) {
g_warning ("Failed to load commit hash.\n");
return NULL;
}
data.commit_hash = commit_hash;
data.result = NULL;
for (i = 0; i < n; i++) {
res = seaf_commit_manager_traverse_commit_tree (seaf->commit_mgr,
twos[i]->repo_id,
twos[i]->version,
twos[i]->commit_id,
get_merge_bases,
&data, FALSE);
if (!res)
goto fail;
}
g_hash_table_destroy (commit_hash);
result = data.result;
if (!result || !result->next)
return result;
/* There are more than one. Try to find out independent ones. */
result = get_independent_commits (result);
return result;
fail:
result = data.result;
while (result) {
commit = result->data;
seaf_commit_unref (commit);
result = g_list_delete_link (result, result);
}
g_hash_table_destroy (commit_hash);
return NULL;
}
/*
* Returns common ancesstor for two branches.
* Any two commits should have a common ancestor.
* So returning NULL indicates an error, for e.g. corupt commit.
*/
SeafCommit *
get_merge_base (SeafCommit *head, SeafCommit *remote)
{
GList *result, *iter;
SeafCommit *one, **twos;
int n, i;
SeafCommit *ret = NULL;
one = head;
twos = (SeafCommit **) calloc (1, sizeof(SeafCommit *));
twos[0] = remote;
n = 1;
result = merge_bases_many (one, n, twos);
free (twos);
if (!result || !result->next)
goto done;
/*
* More than one common ancestors.
* Loop until the oldest common ancestor is found.
*/
while (1) {
n = g_list_length (result) - 1;
one = result->data;
twos = calloc (n, sizeof(SeafCommit *));
for (iter = result->next, i = 0; i < n; iter = iter->next, i++) {
twos[i] = iter->data;
}
g_list_free (result);
result = merge_bases_many (one, n, twos);
free (twos);
if (!result || !result->next)
break;
}
done:
if (result)
ret = result->data;
g_list_free (result);
return ret;
}
/*
* Returns true if src_head is ahead of dst_head.
*/
gboolean
is_fast_forward (const char *repo_id, int version,
const char *src_head, const char *dst_head)
{
VCCompareResult res;
res = vc_compare_commits (repo_id, version, src_head, dst_head);
return (res == VC_FAST_FORWARD);
}
VCCompareResult
vc_compare_commits (const char *repo_id, int version,
const char *c1, const char *c2)
{
SeafCommit *commit1, *commit2, *ca;
VCCompareResult ret;
/* Treat the same as up-to-date. */
if (strcmp (c1, c2) == 0)
return VC_UP_TO_DATE;
commit1 = seaf_commit_manager_get_commit (seaf->commit_mgr, repo_id, version, c1);
if (!commit1)
return VC_INDEPENDENT;
commit2 = seaf_commit_manager_get_commit (seaf->commit_mgr, repo_id, version, c2);
if (!commit2) {
seaf_commit_unref (commit1);
return VC_INDEPENDENT;
}
ca = get_merge_base (commit1, commit2);
if (!ca)
ret = VC_INDEPENDENT;
else if (strcmp(ca->commit_id, commit1->commit_id) == 0)
ret = VC_UP_TO_DATE;
else if (strcmp(ca->commit_id, commit2->commit_id) == 0)
ret = VC_FAST_FORWARD;
else
ret = VC_INDEPENDENT;
if (ca) seaf_commit_unref (ca);
seaf_commit_unref (commit1);
seaf_commit_unref (commit2);
return ret;
}
/**
* Diff a specific file with parent(s).
* If @commit is a merge, both parents will be compared.
* @commit must have this file and it's id is given in @file_id.
*
* Returns 0 if there is no difference; 1 otherwise.
* If returns 0, @parent will point to the next commit to traverse.
* If I/O error occurs, @error will be set.
*/
static int
diff_parents_with_path (SeafCommit *commit,
const char *repo_id,
const char *store_id,
int version,
const char *path,
const char *file_id,
char *parent,
GError **error)
{
SeafCommit *p1 = NULL, *p2 = NULL;
char *file_id_p1 = NULL, *file_id_p2 = NULL;
int ret = 0;
p1 = seaf_commit_manager_get_commit (seaf->commit_mgr,
commit->repo_id,
commit->version,
commit->parent_id);
if (!p1) {
g_warning ("Failed to find commit %s.\n", commit->parent_id);
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, " ");
return 0;
}
if (strcmp (p1->root_id, EMPTY_SHA1) == 0) {
seaf_commit_unref (p1);
return 1;
}
if (commit->second_parent_id) {
p2 = seaf_commit_manager_get_commit (seaf->commit_mgr,
commit->repo_id,
commit->version,
commit->second_parent_id);
if (!p2) {
g_warning ("Failed to find commit %s.\n", commit->second_parent_id);
seaf_commit_unref (p1);
g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, " ");
return 0;
}
}
if (!p2) {
file_id_p1 = seaf_fs_manager_path_to_obj_id (seaf->fs_mgr,
store_id,
version,
p1->root_id, path,
NULL,
error);
if (*error)
goto out;
if (!file_id_p1 || strcmp (file_id, file_id_p1) != 0)
ret = 1;
else
memcpy (parent, p1->commit_id, 41);
} else {
file_id_p1 = seaf_fs_manager_path_to_obj_id (seaf->fs_mgr,
store_id,
version,
p1->root_id, path,
NULL, error);
if (*error)
goto out;
file_id_p2 = seaf_fs_manager_path_to_obj_id (seaf->fs_mgr,
store_id,
version,
p2->root_id, path,
NULL, error);
if (*error)
goto out;
if (file_id_p1 && file_id_p2) {
if (strcmp(file_id, file_id_p1) != 0 &&
strcmp(file_id, file_id_p2) != 0)
ret = 1;
else if (strcmp(file_id, file_id_p1) == 0)
memcpy (parent, p1->commit_id, 41);
else
memcpy (parent, p2->commit_id, 41);
} else if (file_id_p1 && !file_id_p2) {
if (strcmp(file_id, file_id_p1) != 0)
ret = 1;
else
memcpy (parent, p1->commit_id, 41);
} else if (!file_id_p1 && file_id_p2) {
if (strcmp(file_id, file_id_p2) != 0)
ret = 1;
else
memcpy (parent, p2->commit_id, 41);
} else {
ret = 1;
}
}
out:
g_free (file_id_p1);
g_free (file_id_p2);
if (p1)
seaf_commit_unref (p1);
if (p2)
seaf_commit_unref (p2);
return ret;
}
static int
get_file_modifier_mtime_v0 (const char *repo_id, const char *store_id, int version,
const char *head, const char *path,
char **modifier, gint64 *mtime)
{
char commit_id[41];
SeafCommit *commit = NULL;
char *file_id = NULL;
int changed;
int ret = 0;
GError *error = NULL;
*modifier = NULL;
*mtime = 0;
memcpy (commit_id, head, 41);
while (1) {
commit = seaf_commit_manager_get_commit (seaf->commit_mgr,
repo_id, version,
commit_id);
if (!commit) {
ret = -1;
break;
}
/* We hit the initial commit. */
if (!commit->parent_id)
break;
file_id = seaf_fs_manager_path_to_obj_id (seaf->fs_mgr,
store_id, version,
commit->root_id,
path,
NULL,
&error);
if (error) {
g_clear_error (&error);
ret = -1;
break;
}
/* We expect commit to have this file. */
if (!file_id) {
ret = -1;
break;
}
changed = diff_parents_with_path (commit,
repo_id, store_id, version,
path, file_id,
commit_id, &error);
if (error) {
g_clear_error (&error);
ret = -1;
break;
}
if (changed) {
*modifier = g_strdup (commit->creator_name);
*mtime = commit->ctime;
break;
} else {
/* If this commit doesn't change the file, commit_id will be set
* to the parent commit to traverse.
*/
g_free (file_id);
seaf_commit_unref (commit);
}
}
g_free (file_id);
if (commit)
seaf_commit_unref (commit);
return ret;
}
static int
get_file_modifier_mtime_v1 (const char *repo_id, const char *store_id, int version,
const char *head, const char *path,
char **modifier, gint64 *mtime)
{
SeafCommit *commit = NULL;
SeafDir *dir = NULL;
SeafDirent *dent = NULL;
int ret = 0;
commit = seaf_commit_manager_get_commit (seaf->commit_mgr,
repo_id, version,
head);
if (!commit) {
seaf_warning ("Failed to get commit %s.\n", head);
return -1;
}
char *parent = g_path_get_dirname (path);
if (strcmp(parent, ".") == 0) {
g_free (parent);
parent = g_strdup("");
}
char *filename = g_path_get_basename (path);
dir = seaf_fs_manager_get_seafdir_by_path (seaf->fs_mgr,
store_id, version,
commit->root_id,
parent, NULL);
if (!dir) {
seaf_warning ("dir %s doesn't exist in repo %s.\n", parent, repo_id);
ret = -1;
goto out;
}
GList *p;
for (p = dir->entries; p; p = p->next) {
SeafDirent *d = p->data;
if (strcmp (d->name, filename) == 0) {
dent = d;
break;
}
}
if (!dent) {
goto out;
}
*modifier = g_strdup(dent->modifier);
*mtime = dent->mtime;
out:
g_free (parent);
g_free (filename);
seaf_commit_unref (commit);
seaf_dir_free (dir);
return ret;
}
/**
* Get the user who last changed a file and the mtime.
* @head: head commit to start the search.
* @path: path of the file.
*/
int
get_file_modifier_mtime (const char *repo_id,
const char *store_id,
int version,
const char *head,
const char *path,
char **modifier,
gint64 *mtime)
{
if (version > 0)
return get_file_modifier_mtime_v1 (repo_id, store_id, version,
head, path,
modifier, mtime);
else
return get_file_modifier_mtime_v0 (repo_id, store_id, version,
head, path,
modifier, mtime);
}
char *
gen_conflict_path (const char *origin_path,
const char *modifier,
gint64 mtime)
{
char time_buf[64];
time_t t = (time_t)mtime;
char *copy = g_strdup (origin_path);
GString *conflict_path = g_string_new (NULL);
char *dot, *ext;
strftime(time_buf, 64, "%Y-%m-%d-%H-%M-%S", localtime(&t));
dot = strrchr (copy, '.');
if (dot != NULL) {
*dot = '\0';
ext = dot + 1;
if (modifier)
g_string_printf (conflict_path, "%s (SFConflict %s %s).%s",
copy, modifier, time_buf, ext);
else
g_string_printf (conflict_path, "%s (SFConflict %s).%s",
copy, time_buf, ext);
} else {
if (modifier)
g_string_printf (conflict_path, "%s (SFConflict %s %s)",
copy, modifier, time_buf);
else
g_string_printf (conflict_path, "%s (SFConflict %s)",
copy, time_buf);
}
g_free (copy);
return g_string_free (conflict_path, FALSE);
}
char *
gen_conflict_path_wrapper (const char *repo_id, int version,
const char *head, const char *in_repo_path,
const char *original_path)
{
char *modifier;
gint64 mtime;
/* XXX: this function is only used in client, so store_id is always
* the same as repo_id. This can be changed if it's also called in
* server.
*/
if (get_file_modifier_mtime (repo_id, repo_id, version, head, in_repo_path,
&modifier, &mtime) < 0)
return NULL;
return gen_conflict_path (original_path, modifier, mtime);
}
================================================
FILE: common/vc-common.h
================================================
#ifndef VC_COMMON_H
#define VC_COMMON_H
#include "commit-mgr.h"
SeafCommit *
get_merge_base (SeafCommit *head, SeafCommit *remote);
/*
* Returns true if src_head is ahead of dst_head.
*/
gboolean
is_fast_forward (const char *repo_id, int version,
const char *src_head, const char *dst_head);
typedef enum {
VC_UP_TO_DATE,
VC_FAST_FORWARD,
VC_INDEPENDENT,
} VCCompareResult;
/*
* Compares commits c1 and c2 as if we were going to merge c1 into c2.
*
* Returns:
* VC_UP_TO_DATE: if c2 is ahead of c1, or c1 == c2;
* VC_FAST_FORWARD: if c1 is ahead of c2;
* VC_INDEPENDENT: if c1 and c2 has no inheritent relationship.
* Returns VC_INDEPENDENT if c1 or c2 doesn't exist.
*/
VCCompareResult
vc_compare_commits (const char *repo_id, int version,
const char *c1, const char *c2);
char *
gen_conflict_path (const char *original_path,
const char *modifier,
gint64 mtime);
int
get_file_modifier_mtime (const char *repo_id, const char *store_id, int version,
const char *head, const char *path,
char **modifier, gint64 *mtime);
/* Wrapper around the above two functions */
char *
gen_conflict_path_wrapper (const char *repo_id, int version,
const char *head, const char *in_repo_path,
const char *original_path);
#endif
================================================
FILE: configure.ac
================================================
dnl Process this file with autoconf to produce a configure script.
AC_PREREQ(2.61)
AC_INIT([seafile], [6.0.1], [freeplant@gmail.com])
AC_CONFIG_HEADER([config.h])
AC_CONFIG_MACRO_DIR([m4])
AM_INIT_AUTOMAKE([1.9 foreign])
#AC_MINGW32
AC_CANONICAL_BUILD
dnl enable the build of share library by default
AC_ENABLE_SHARED
AC_SUBST(LIBTOOL_DEPS)
# Checks for programs.
AC_PROG_CC
#AM_C_PROTOTYPES
AC_C_CONST
AC_PROG_MAKE_SET
# AC_PROG_RANLIB
LT_INIT
# Checks for headers.
#AC_CHECK_HEADERS([arpa/inet.h fcntl.h inttypes.h libintl.h limits.h locale.h netdb.h netinet/in.h stdint.h stdlib.h string.h strings.h sys/ioctl.h sys/socket.h sys/time.h termios.h unistd.h utime.h utmp.h])
# Checks for typedefs, structures, and compiler characteristics.
AC_SYS_LARGEFILE
# Checks for library functions.
#AC_CHECK_FUNCS([alarm dup2 ftruncate getcwd gethostbyname gettimeofday memmove memset mkdir rmdir select setlocale socket strcasecmp strchr strdup strrchr strstr strtol uname utime strtok_r sendfile])
# check platform
AC_MSG_CHECKING(for WIN32)
if test "$build_os" = "mingw32" -o "$build_os" = "mingw64"; then
bwin32=true
AC_MSG_RESULT(compile in mingw)
else
AC_MSG_RESULT(no)
fi
AC_MSG_CHECKING(for Mac)
if test "$(uname)" = "Darwin"; then
bmac=true
AC_MSG_RESULT(compile in mac)
else
AC_MSG_RESULT(no)
fi
AC_MSG_CHECKING(for Linux)
if test "$bmac" != "true" -a "$bwin32" != "true"; then
blinux=true
AC_MSG_RESULT(compile in linux)
else
AC_MSG_RESULT(no)
fi
# test which sub-component to compile
if test "$bwin32" = true; then
compile_tools=no
fi
if test "$bmac" = true; then
compile_tools=no
fi
if test "$blinux" = true; then
compile_tools=yes
fi
if test "$bwin32" != true; then
AC_ARG_ENABLE(fuse, AC_HELP_STRING([--enable-fuse], [enable fuse virtual file system]),
[compile_fuse=$enableval],[compile_fuse="yes"])
fi
AC_ARG_ENABLE(python,
AC_HELP_STRING([--enable-python],[build seafile python binding]),
[compile_python=$enableval],
[compile_python=yes])
AC_ARG_WITH(mysql,
AC_HELP_STRING([--with-mysql],[path to mysql_config]),
[MYSQL_CONFIG=$with_mysql],
[MYSQL_CONFIG="default_mysql_config"])
AC_ARG_ENABLE(httpserver, AC_HELP_STRING([--enable-httpserver], [enable httpserver]),
[compile_httpserver=$enableval],[compile_httpserver="yes"])
AM_CONDITIONAL([COMPILE_TOOLS], [test "${compile_tools}" = "yes"])
AM_CONDITIONAL([COMPILE_PYTHON], [test "${compile_python}" = "yes"])
AM_CONDITIONAL([COMPILE_FUSE], [test "${compile_fuse}" = "yes"])
AM_CONDITIONAL([WIN32], [test "$bwin32" = "true"])
AM_CONDITIONAL([MACOS], [test "$bmac" = "true"])
AM_CONDITIONAL([LINUX], [test "$blinux" = "true"])
# check libraries
if test "$bwin32" != true; then
if test "$bmac" = true; then
AC_CHECK_LIB(c, uuid_generate, [echo "found library uuid"],
AC_MSG_ERROR([*** Unable to find uuid_generate in libc]), )
else
AC_CHECK_LIB(uuid, uuid_generate, [echo "found library uuid"],
AC_MSG_ERROR([*** Unable to find uuid library]), )
fi
fi
AC_CHECK_LIB(pthread, pthread_create, [echo "found library pthread"], AC_MSG_ERROR([*** Unable to find pthread library]), )
AC_CHECK_LIB(sqlite3, sqlite3_open,[echo "found library sqlite3"] , AC_MSG_ERROR([*** Unable to find sqlite3 library]), )
AC_CHECK_LIB(crypto, SHA1_Init, [echo "found library crypto"], AC_MSG_ERROR([*** Unable to find openssl crypto library]), )
dnl Do we need to use AX_LIB_SQLITE3 to check sqlite?
dnl AX_LIB_SQLITE3
CONSOLE=
if test "$bwin32" = "true"; then
AC_ARG_ENABLE(console, AC_HELP_STRING([--enable-console], [enable console]),
[console=$enableval],[console="yes"])
if test x${console} != xyes ; then
CONSOLE="-Wl,--subsystem,windows -Wl,--entry,_mainCRTStartup"
fi
fi
AC_SUBST(CONSOLE)
if test "$bwin32" = true; then
LIB_WS32=-lws2_32
LIB_GDI32=-lgdi32
LIB_RT=
LIB_INTL=-lintl
LIBS=
LIB_RESOLV=
LIB_UUID=-lRpcrt4
LIB_IPHLPAPI=-liphlpapi
LIB_SHELL32=-lshell32
LIB_PSAPI=-lpsapi
LIB_MAC=
MSVC_CFLAGS="-D__MSVCRT__ -D__MSVCRT_VERSION__=0x0601"
LIB_CRYPT32=-lcrypt32
LIB_ICONV=-liconv
elif test "$bmac" = true ; then
LIB_WS32=
LIB_GDI32=
LIB_RT=
LIB_INTL=
LIB_RESOLV=-lresolv
LIB_UUID=
LIB_IPHLPAPI=
LIB_SHELL32=
LIB_PSAPI=
MSVC_CFLAGS=
LIB_MAC="-framework CoreServices"
LIB_CRYPT32=
LIB_ICONV=-liconv
else
LIB_WS32=
LIB_GDI32=
LIB_RT=
LIB_INTL=
LIB_RESOLV=-lresolv
LIB_UUID=-luuid
LIB_IPHLPAPI=
LIB_SHELL32=
LIB_PSAPI=
LIB_MAC=
MSVC_CFLAGS=
LIB_CRYPT32=
fi
AC_SUBST(LIB_WS32)
AC_SUBST(LIB_GDI32)
AC_SUBST(LIB_RT)
AC_SUBST(LIB_INTL)
AC_SUBST(LIB_RESOLV)
AC_SUBST(LIB_UUID)
AC_SUBST(LIB_IPHLPAPI)
AC_SUBST(LIB_SHELL32)
AC_SUBST(LIB_PSAPI)
AC_SUBST(LIB_MAC)
AC_SUBST(MSVC_CFLAGS)
AC_SUBST(LIB_CRYPT32)
AC_SUBST(LIB_ICONV)
LIBEVENT_REQUIRED=2.0
GLIB_REQUIRED=2.16.0
SEARPC_REQUIRED=1.0
JANSSON_REQUIRED=2.2.1
ZDB_REQUIRED=2.10
#LIBNAUTILUS_EXTENSION_REQUIRED=2.30.1
CURL_REQUIRED=7.17
FUSE_REQUIRED=2.7.3
ZLIB_REQUIRED=1.2.0
LIHIBREDIS_REQUIRED=0.15.0
PKG_CHECK_MODULES(SSL, [openssl])
AC_SUBST(SSL_CFLAGS)
AC_SUBST(SSL_LIBS)
PKG_CHECK_MODULES(GLIB2, [glib-2.0 >= $GLIB_REQUIRED])
AC_SUBST(GLIB2_CFLAGS)
AC_SUBST(GLIB2_LIBS)
PKG_CHECK_MODULES(GOBJECT, [gobject-2.0 >= $GLIB_REQUIRED])
AC_SUBST(GOBJECT_CFLAGS)
AC_SUBST(GOBJECT_LIBS)
PKG_CHECK_MODULES(SEARPC, [libsearpc >= $SEARPC_REQUIRED])
AC_SUBST(SEARPC_CFLAGS)
AC_SUBST(SEARPC_LIBS)
PKG_CHECK_MODULES(JANSSON, [jansson >= $JANSSON_REQUIRED])
AC_SUBST(JANSSON_CFLAGS)
AC_SUBST(JANSSON_LIBS)
PKG_CHECK_MODULES(LIBEVENT, [libevent >= $LIBEVENT_REQUIRED])
AC_SUBST(LIBEVENT_CFLAGS)
AC_SUBST(LIBEVENT_LIBS)
PKG_CHECK_MODULES(ZLIB, [zlib >= $ZLIB_REQUIRED])
AC_SUBST(ZLIB_CFLAGS)
AC_SUBST(ZLIB_LIBS)
if test "x${MYSQL_CONFIG}" = "xdefault_mysql_config"; then
PKG_CHECK_MODULES(MYSQL, [mysqlclient], [have_mysql="yes"], [have_mysql="no"])
if test "x${have_mysql}" = "xyes"; then
AC_SUBST(MYSQL_CFLAGS)
AC_SUBST(MYSQL_LIBS)
AC_DEFINE([HAVE_MYSQL], 1, [Define to 1 if MySQL support is enabled])
fi
else
AC_MSG_CHECKING([for MySQL])
MYSQL_CFLAGS=`${MYSQL_CONFIG} --include`
MYSQL_LIBS=`${MYSQL_CONFIG} --libs`
AC_MSG_RESULT([${MYSQL_CFLAGS}])
AC_SUBST(MYSQL_CFLAGS)
AC_SUBST(MYSQL_LIBS)
AC_DEFINE([HAVE_MYSQL], 1, [Define to 1 if MySQL support is enabled])
fi
if test "${compile_httpserver}" = "yes"; then
AC_DEFINE([HAVE_EVHTP], [1], [Define to 1 if httpserver is enabled.])
AC_SUBST(EVHTP_LIBS, "-levhtp")
fi
PKG_CHECK_MODULES(LIBHIREDIS, [hiredis >= $LIHIBREDIS_REQUIRED])
AC_SUBST(LIBHIREDIS_CFLAGS)
AC_SUBST(LIBHIREDIS_LIBS)
PKG_CHECK_MODULES(CURL, [libcurl >= $CURL_REQUIRED])
AC_SUBST(CURL_CFLAGS)
AC_SUBST(CURL_LIBS)
PKG_CHECK_MODULES(JWT, [libjwt])
AC_SUBST(JWT_CFLAGS)
AC_SUBST(JWT_LIBS)
PKG_CHECK_MODULES(ARGON2, [libargon2])
AC_SUBST(ARGON2_CFLAGS)
AC_SUBST(ARGON2_LIBS)
if test x${compile_python} = xyes; then
AM_PATH_PYTHON([2.6])
if test "$bwin32" = true; then
if test x$PYTHON_DIR != x; then
# set pyexecdir to somewhere like /c/Python26/Lib/site-packages
pyexecdir=${PYTHON_DIR}/Lib/site-packages
pythondir=${pyexecdir}
pkgpyexecdir=${pyexecdir}/${PACKAGE}
pkgpythondir=${pythondir}/${PACKAGE}
fi
fi
fi
if test "${compile_fuse}" = "yes"; then
PKG_CHECK_MODULES(FUSE, [fuse >= $FUSE_REQUIRED])
AC_SUBST(FUSE_CFLAGS)
AC_SUBST(FUSE_LIBS)
fi
dnl check libarchive
LIBARCHIVE_REQUIRED=2.8.5
PKG_CHECK_MODULES(LIBARCHIVE, [libarchive >= $LIBARCHIVE_REQUIRED])
AC_SUBST(LIBARCHIVE_CFLAGS)
AC_SUBST(LIBARCHIVE_LIBS)
ac_configure_args="$ac_configure_args -q"
AC_CONFIG_FILES(
Makefile
include/Makefile
fuse/Makefile
lib/Makefile
lib/libseafile.pc
common/Makefile
common/cdc/Makefile
server/Makefile
server/gc/Makefile
python/Makefile
python/seafile/Makefile
python/seaserv/Makefile
controller/Makefile
tools/Makefile
doc/Makefile
scripts/Makefile
)
AC_OUTPUT
================================================
FILE: controller/Makefile.am
================================================
bin_PROGRAMS = seafile-controller
AM_CFLAGS = \
-DSEAFILE_SERVER \
-I$(top_srcdir)/include \
-I$(top_srcdir)/lib \
-I$(top_builddir)/lib \
-I$(top_srcdir)/common \
@SEARPC_CFLAGS@ \
@GLIB2_CFLAGS@ \
-Wall
noinst_HEADERS = seafile-controller.h ../common/log.h
seafile_controller_SOURCES = seafile-controller.c ../common/log.c
seafile_controller_LDADD = $(top_builddir)/lib/libseafile_common.la \
@GLIB2_LIBS@ @GOBJECT_LIBS@ @SSL_LIBS@ @LIB_RT@ @LIB_UUID@ @LIBEVENT_LIBS@ \
@SEARPC_LIBS@ @JANSSON_LIBS@ @ZLIB_LIBS@
================================================
FILE: controller/seafile-controller.c
================================================
/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
#include "common.h"
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include "utils.h"
#include "log.h"
#include "seafile-controller.h"
#define CHECK_PROCESS_INTERVAL 10 /* every 10 seconds */
#if defined(__sun)
#define PROC_SELF_PATH "/proc/self/path/a.out"
#else
#define PROC_SELF_PATH "/proc/self/exe"
#endif
SeafileController *ctl;
static char *controller_pidfile = NULL;
char *bin_dir = NULL;
char *installpath = NULL;
char *topdir = NULL;
gboolean enabled_go_fileserver = FALSE;
char *seafile_ld_library_path = NULL;
static const char *short_opts = "hvftc:d:l:g:G:P:F:";
static const struct option long_opts[] = {
{ "help", no_argument, NULL, 'h', },
{ "version", no_argument, NULL, 'v', },
{ "foreground", no_argument, NULL, 'f', },
{ "test", no_argument, NULL, 't', },
{ "config-dir", required_argument, NULL, 'c', },
{ "seafile-dir", required_argument, NULL, 'd', },
{ "central-config-dir", required_argument, NULL, 'F' },
{ "logdir", required_argument, NULL, 'l', },
{ "ccnet-debug-level", required_argument, NULL, 'g' },
{ "seafile-debug-level", required_argument, NULL, 'G' },
{ "pidfile", required_argument, NULL, 'P' },
{ NULL, 0, NULL, 0, },
};
static void controller_exit (int code) __attribute__((noreturn));
static int read_seafdav_config();
static void
controller_exit (int code)
{
if (code != 0) {
seaf_warning ("seaf-controller exited with code %d\n", code);
}
exit(code);
}
//
// Utility functions Start
//
/* returns the pid of the newly created process */
static int
spawn_process (char *argv[], bool is_python_process)
{
char **ptr = argv;
GString *buf = g_string_new(argv[0]);
while (*(++ptr)) {
g_string_append_printf (buf, " %s", *ptr);
}
seaf_message ("spawn_process: %s\n", buf->str);
g_string_free (buf, TRUE);
int pipefd[2] = {0, 0};
if (is_python_process) {
if (pipe(pipefd) < 0) {
seaf_warning("Failed to create pipe.\n");
}
fcntl(pipefd[0], F_SETFL, O_NONBLOCK);
}
pid_t pid = fork();
if (pid == 0) {
if (is_python_process) {
if (pipefd[0] > 0 && pipefd[1] > 0) {
close(pipefd[0]);
dup2(pipefd[1], 2);
}
}
/* child process */
execvp (argv[0], argv);
seaf_warning ("failed to execvp %s\n", argv[0]);
if (pipefd[1] > 0) {
close(pipefd[1]);
}
exit(-1);
} else {
/* controller */
if (pid == -1)
seaf_warning ("error when fork %s: %s\n", argv[0], strerror(errno));
else
seaf_message ("spawned %s, pid %d\n", argv[0], pid);
if (is_python_process) {
char child_stderr[1024] = {0};
if (pipefd[0] > 0 && pipefd[1] > 0){
close(pipefd[1]);
sleep(1);
while (read(pipefd[0], child_stderr, sizeof(child_stderr)) > 0)
seaf_warning("%s", child_stderr);
close(pipefd[0]);
}
}
return (int)pid;
}
}
#define PID_ERROR_ENOENT 0
#define PID_ERROR_OTHER -1
/**
* @return
* - pid if successfully opened and read the file
* - PID_ERROR_ENOENT if file not exists,
* - PID_ERROR_OTHER if other errors
*/
static int
read_pid_from_pidfile (const char *pidfile)
{
FILE *pf = g_fopen (pidfile, "r");
if (!pf) {
if (errno == ENOENT) {
return PID_ERROR_ENOENT;
} else {
return PID_ERROR_OTHER;
}
}
int pid = PID_ERROR_OTHER;
if (fscanf (pf, "%d", &pid) < 0) {
seaf_warning ("bad pidfile format: %s\n", pidfile);
fclose(pf);
return PID_ERROR_OTHER;
}
fclose(pf);
return pid;
}
static void
kill_by_force (int which)
{
if (which < 0 || which >= N_PID)
return;
char *pidfile = ctl->pidfile[which];
int pid = read_pid_from_pidfile(pidfile);
if (pid > 0) {
// if SIGKILL send success, then remove related pid file
if (kill ((pid_t)pid, SIGKILL) == 0) {
g_unlink (pidfile);
}
}
}
//
// Utility functions End
//
static int
start_seaf_server ()
{
if (!ctl->config_dir || !ctl->seafile_dir)
return -1;
seaf_message ("starting seaf-server ...\n");
static char *logfile = NULL;
if (logfile == NULL) {
logfile = g_build_filename (ctl->logdir, "seafile.log", NULL);
}
char *argv[] = {
"seaf-server",
"-F", ctl->central_config_dir,
"-c", ctl->config_dir,
"-d", ctl->seafile_dir,
"-l", logfile,
"-P", ctl->pidfile[PID_SERVER],
"-p", ctl->rpc_pipe_path,
NULL};
int pid = spawn_process (argv, false);
if (pid <= 0) {
seaf_warning ("Failed to spawn seaf-server\n");
return -1;
}
return 0;
}
static int
start_go_fileserver()
{
if (!ctl->central_config_dir || !ctl->seafile_dir)
return -1;
static char *logfile = NULL;
if (logfile == NULL) {
logfile = g_build_filename (ctl->logdir, "fileserver.log", NULL);
}
char *argv[] = {
"fileserver",
"-F", ctl->central_config_dir,
"-d", ctl->seafile_dir,
"-l", logfile,
"-p", ctl->rpc_pipe_path,
"-P", ctl->pidfile[PID_FILESERVER],
NULL};
seaf_message ("starting go-fileserver ...");
int pid = spawn_process(argv, false);
if (pid <= 0) {
seaf_warning("Failed to spawn fileserver\n");
return -1;
}
return 0;
}
static const char *
get_python_executable() {
static const char *python = NULL;
if (python != NULL) {
return python;
}
static const char *try_list[] = {
"python3"
};
int i;
for (i = 0; i < G_N_ELEMENTS(try_list); i++) {
char *binary = g_find_program_in_path (try_list[i]);
if (binary != NULL) {
python = binary;
break;
}
}
if (python == NULL) {
python = g_getenv ("PYTHON");
if (python == NULL) {
python = "python";
}
}
return python;
}
static void
init_seafile_path ()
{
GError *error = NULL;
char *binary = g_file_read_link (PROC_SELF_PATH, &error);
char *tmp = NULL;
if (error != NULL) {
seaf_warning ("failed to readlink: %s\n", error->message);
return;
}
bin_dir = g_path_get_dirname (binary);
tmp = g_path_get_dirname (bin_dir);
installpath = g_path_get_dirname (tmp);
topdir = g_path_get_dirname (installpath);
g_free (binary);
g_free (tmp);
}
static void
setup_python_path()
{
static GList *path_list = NULL;
if (path_list != NULL) {
/* Only setup once */
return;
}
/* Allow seafdav to access seahub_settings.py */
path_list = g_list_prepend (path_list, g_build_filename (topdir, "conf", NULL));
path_list = g_list_prepend (path_list,
g_build_filename (installpath, "seahub", NULL));
path_list = g_list_prepend (path_list,
g_build_filename (installpath, "seahub/thirdpart", NULL));
path_list = g_list_prepend (path_list,
g_build_filename (installpath, "seahub/seahub-extra", NULL));
path_list = g_list_prepend (path_list,
g_build_filename (installpath, "seahub/seahub-extra/thirdparts", NULL));
path_list = g_list_prepend (path_list,
g_build_filename (installpath, "seafile/lib/python3/site-packages", NULL));
path_list = g_list_prepend (path_list,
g_build_filename (installpath, "seafile/lib64/python3/site-packages", NULL));
path_list = g_list_reverse (path_list);
GList *ptr;
GString *new_pypath = g_string_new (g_getenv("PYTHONPATH"));
for (ptr = path_list; ptr != NULL; ptr = ptr->next) {
const char *path = (char *)ptr->data;
g_string_append_c (new_pypath, ':');
g_string_append (new_pypath, path);
}
g_setenv ("PYTHONPATH", g_string_free (new_pypath, FALSE), TRUE);
/* seaf_message ("PYTHONPATH is:\n\n%s\n", g_getenv ("PYTHONPATH")); */
}
static void
setup_env ()
{
g_setenv ("CCNET_CONF_DIR", ctl->config_dir, TRUE);
g_setenv ("SEAFILE_CONF_DIR", ctl->seafile_dir, TRUE);
g_setenv ("SEAFILE_CENTRAL_CONF_DIR", ctl->central_config_dir, TRUE);
g_setenv ("SEAFILE_RPC_PIPE_PATH", ctl->rpc_pipe_path, TRUE);
char *seahub_dir = g_build_filename (installpath, "seahub", NULL);
char *seafdav_conf = g_build_filename (ctl->central_config_dir, "seafdav.conf", NULL);
g_setenv ("SEAHUB_DIR", seahub_dir, TRUE);
g_setenv ("SEAFDAV_CONF", seafdav_conf, TRUE);
setup_python_path();
}
static int
start_seafdav() {
static char *seafdav_log_file = NULL;
if (seafdav_log_file == NULL)
seafdav_log_file = g_build_filename (ctl->logdir,
"seafdav.log",
NULL);
SeafDavConfig conf = ctl->seafdav_config;
char port[16];
snprintf (port, sizeof(port), "%d", conf.port);
int pid;
if (conf.debug_mode) {
char *argv[] = {
(char *)get_python_executable(),
"-m", "wsgidav.server.server_cli",
"--server", "gunicorn",
"--root", "/",
"--log-file", seafdav_log_file,
"--pid", ctl->pidfile[PID_SEAFDAV],
"--port", port,
"--host", conf.host,
"-v",
NULL
};
pid = spawn_process (argv, true);
} else {
char *argv[] = {
(char *)get_python_executable(),
"-m", "wsgidav.server.server_cli",
"--server", "gunicorn",
"--root", "/",
"--log-file", seafdav_log_file,
"--pid", ctl->pidfile[PID_SEAFDAV],
"--port", port,
"--host", conf.host,
NULL
};
pid = spawn_process (argv, true);
}
if (pid <= 0) {
seaf_warning ("Failed to spawn seafdav\n");
return -1;
}
return 0;
}
static void
run_controller_loop ()
{
GMainLoop *mainloop = g_main_loop_new (NULL, FALSE);
g_main_loop_run (mainloop);
}
static gboolean
need_restart (int which)
{
if (which < 0 || which >= N_PID)
return FALSE;
int pid = read_pid_from_pidfile (ctl->pidfile[which]);
if (pid == PID_ERROR_ENOENT) {
seaf_warning ("pid file %s does not exist\n", ctl->pidfile[which]);
return TRUE;
} else if (pid == PID_ERROR_OTHER) {
seaf_warning ("failed to read pidfile %s: %s\n", ctl->pidfile[which], strerror(errno));
return FALSE;
} else {
char buf[256];
snprintf (buf, sizeof(buf), "/proc/%d", pid);
if (g_file_test (buf, G_FILE_TEST_IS_DIR)) {
return FALSE;
} else {
seaf_warning ("path /proc/%d doesn't exist, restart progress [%d]\n", pid, which);
return TRUE;
}
}
}
static gboolean
should_start_go_fileserver()
{
char *seafile_conf = g_build_filename (ctl->central_config_dir, "seafile.conf", NULL);
GKeyFile *key_file = g_key_file_new ();
gboolean ret = 0;
if (!g_key_file_load_from_file (key_file, seafile_conf,
G_KEY_FILE_KEEP_COMMENTS, NULL)) {
seaf_warning("Failed to load seafile.conf.\n");
ret = FALSE;
goto out;
}
GError *err = NULL;
gboolean enabled;
enabled = g_key_file_get_boolean(key_file, "fileserver", "use_go_fileserver", &err);
if (err) {
seaf_warning("Config [fileserver, use_go_fileserver] not set, default is FALSE.\n");
ret = FALSE;
g_clear_error(&err);
} else {
if (enabled) {
ret = TRUE;
} else {
ret = FALSE;
}
}
if (ret) {
char *type = NULL;
type = g_key_file_get_string (key_file, "database", "type", NULL);
if (!type || g_strcmp0 (type, "mysql") != 0) {
seaf_message ("Use C fileserver because go fileserver does not support sqlite.");
ret = FALSE;
}
g_free (type);
}
out:
g_key_file_free (key_file);
g_free (seafile_conf);
return ret;
}
static gboolean
check_process (void *data)
{
if (need_restart(PID_SERVER)) {
seaf_message ("seaf-server need restart...\n");
start_seaf_server();
}
if (enabled_go_fileserver) {
if (need_restart(PID_FILESERVER)) {
seaf_message("fileserver need restart...\n");
start_go_fileserver();
}
}
if (ctl->seafdav_config.enabled) {
if (need_restart(PID_SEAFDAV)) {
seaf_message ("seafdav need restart...\n");
start_seafdav ();
}
}
return TRUE;
}
static void
start_process_monitor ()
{
ctl->check_process_timer = g_timeout_add (
CHECK_PROCESS_INTERVAL * 1000, check_process, NULL);
}
static int seaf_controller_start ();
/* This would also stop seaf-server & other components */
static void
stop_services ()
{
seaf_message ("shutting down all services ...\n");
kill_by_force(PID_SERVER);
kill_by_force(PID_FILESERVER);
kill_by_force(PID_SEAFDAV);
}
static void
init_pidfile_path (SeafileController *ctl)
{
char *pid_dir = g_build_filename (topdir, "pids", NULL);
if (!g_file_test(pid_dir, G_FILE_TEST_EXISTS)) {
if (g_mkdir(pid_dir, 0777) < 0) {
seaf_warning("failed to create pid dir %s: %s", pid_dir, strerror(errno));
controller_exit(1);
}
}
ctl->pidfile[PID_SERVER] = g_build_filename (pid_dir, "seaf-server.pid", NULL);
ctl->pidfile[PID_SEAFDAV] = g_build_filename (pid_dir, "seafdav.pid", NULL);
ctl->pidfile[PID_FILESERVER] = g_build_filename (pid_dir, "fileserver.pid", NULL);
}
static int
seaf_controller_init (SeafileController *ctl,
char *central_config_dir,
char *config_dir,
char *seafile_dir,
char *logdir)
{
init_seafile_path ();
if (!g_file_test (config_dir, G_FILE_TEST_IS_DIR)) {
seaf_warning ("invalid config_dir: %s\n", config_dir);
return -1;
}
if (!g_file_test (seafile_dir, G_FILE_TEST_IS_DIR)) {
seaf_warning ("invalid seafile_dir: %s\n", seafile_dir);
return -1;
}
if (logdir == NULL) {
char *topdir = g_path_get_dirname(config_dir);
logdir = g_build_filename (topdir, "logs", NULL);
if (checkdir_with_mkdir(logdir) < 0) {
seaf_error ("failed to create log folder \"%s\": %s\n",
logdir, strerror(errno));
return -1;
}
g_free (topdir);
}
ctl->central_config_dir = central_config_dir;
ctl->config_dir = config_dir;
ctl->seafile_dir = seafile_dir;
ctl->rpc_pipe_path = g_build_filename (installpath, "runtime", NULL);
ctl->logdir = logdir;
if (read_seafdav_config() < 0) {
return -1;
}
init_pidfile_path (ctl);
setup_env ();
return 0;
}
static int
seaf_controller_start ()
{
if (start_seaf_server() < 0) {
seaf_warning ("Failed to start seaf server\n");
return -1;
}
if (enabled_go_fileserver) {
if (start_go_fileserver() < 0) {
seaf_warning ("Failed to start fileserver\n");
return -1;
}
}
start_process_monitor ();
return 0;
}
static int
write_controller_pidfile ()
{
if (!controller_pidfile)
return -1;
pid_t pid = getpid();
FILE *pidfile = g_fopen(controller_pidfile, "w");
if (!pidfile) {
seaf_warning ("Failed to fopen() pidfile %s: %s\n",
controller_pidfile, strerror(errno));
return -1;
}
char buf[32];
snprintf (buf, sizeof(buf), "%d\n", pid);
if (fputs(buf, pidfile) < 0) {
seaf_warning ("Failed to write pidfile %s: %s\n",
controller_pidfile, strerror(errno));
fclose (pidfile);
return -1;
}
fflush (pidfile);
fclose (pidfile);
return 0;
}
static void
remove_controller_pidfile ()
{
if (controller_pidfile) {
g_unlink (controller_pidfile);
}
}
static void
sigint_handler (int signo)
{
stop_services ();
remove_controller_pidfile();
signal (signo, SIG_DFL);
raise (signo);
}
static void
sigchld_handler (int signo)
{
waitpid (-1, NULL, WNOHANG);
}
static void
sigusr1_handler (int signo)
{
seafile_log_reopen();
}
static void
set_signal_handlers ()
{
signal (SIGINT, sigint_handler);
signal (SIGTERM, sigint_handler);
signal (SIGCHLD, sigchld_handler);
signal (SIGUSR1, sigusr1_handler);
signal (SIGPIPE, SIG_IGN);
}
static void
usage ()
{
fprintf (stderr, "Usage: seafile-controller OPTIONS\n"
"OPTIONS:\n"
" -b, --bin-dir insert a directory in front of the PATH env\n"
" -c, --config-dir ccnet config dir\n"
" -d, --seafile-dir seafile dir\n"
);
}
/* seafile-controller -t is used to test whether config file is valid */
static void
test_config (const char *central_config_dir,
const char *ccnet_dir,
const char *seafile_dir)
{
char buf[1024];
GError *error = NULL;
int retcode = 0;
char *child_stdout = NULL;
char *child_stderr = NULL;
snprintf (buf,
sizeof(buf),
"seaf-server -F \"%s\" -c \"%s\" -d \"%s\" -t -f",
central_config_dir,
ccnet_dir,
seafile_dir);
g_spawn_command_line_sync (buf,
&child_stdout,
&child_stderr,
&retcode,
&error);
if (error != NULL) {
seaf_error ("failed to run \"seaf-server -t\": %s\n",
error->message);
exit (1);
}
if (child_stdout) {
fputs (child_stdout, stdout);
}
if (child_stderr) {
fputs (child_stderr, stdout);
}
if (retcode != 0) {
seaf_error ("failed to run \"seaf-server -t\" [%d]\n", retcode);
exit (1);
}
exit(0);
}
static int
read_seafdav_config()
{
int ret = 0;
char *seafdav_conf = NULL;
GKeyFile *key_file = NULL;
GError *error = NULL;
seafdav_conf = g_build_filename(ctl->central_config_dir, "seafdav.conf", NULL);
if (!g_file_test(seafdav_conf, G_FILE_TEST_EXISTS)) {
goto out;
}
key_file = g_key_file_new ();
if (!g_key_file_load_from_file (key_file, seafdav_conf,
G_KEY_FILE_KEEP_COMMENTS, NULL)) {
seaf_warning("Failed to load seafdav.conf\n");
ret = -1;
goto out;
}
/* enabled */
ctl->seafdav_config.enabled = g_key_file_get_boolean(key_file, "WEBDAV", "enabled", &error);
if (error != NULL) {
if (error->code != G_KEY_FILE_ERROR_KEY_NOT_FOUND) {
seaf_message ("Error when reading WEBDAV.enabled, use default value 'false'\n");
}
ctl->seafdav_config.enabled = FALSE;
g_clear_error (&error);
goto out;
}
if (!ctl->seafdav_config.enabled) {
goto out;
}
/* host */
char *host = seaf_key_file_get_string (key_file, "WEBDAV", "host", &error);
if (error != NULL) {
g_clear_error(&error);
ctl->seafdav_config.host = g_strdup("0.0.0.0");
} else {
ctl->seafdav_config.host = host;
}
/* port */
ctl->seafdav_config.port = g_key_file_get_integer(key_file, "WEBDAV", "port", &error);
if (error != NULL) {
if (error->code != G_KEY_FILE_ERROR_KEY_NOT_FOUND) {
seaf_message ("Error when reading WEBDAV.port, use deafult value 8080\n");
}
ctl->seafdav_config.port = 8080;
g_clear_error (&error);
}
ctl->seafdav_config.debug_mode = g_key_file_get_boolean (key_file, "WEBDAV", "debug", &error);
if (error != NULL) {
if (error->code != G_KEY_FILE_ERROR_KEY_NOT_FOUND) {
seaf_message ("Error when reading WEBDAV.debug, use deafult value FALSE\n");
}
ctl->seafdav_config.debug_mode = FALSE;
g_clear_error (&error);
}
if (ctl->seafdav_config.port <= 0 || ctl->seafdav_config.port > 65535) {
seaf_warning("Failed to load seafdav config: invalid port %d\n", ctl->seafdav_config.port);
ret = -1;
goto out;
}
out:
if (key_file) {
g_key_file_free (key_file);
}
g_free (seafdav_conf);
return ret;
}
static int
init_syslog_config ()
{
char *seafile_conf = g_build_filename (ctl->central_config_dir, "seafile.conf", NULL);
GKeyFile *key_file = g_key_file_new ();
int ret = 0;
if (!g_key_file_load_from_file (key_file, seafile_conf,
G_KEY_FILE_KEEP_COMMENTS, NULL)) {
seaf_warning("Failed to load seafile.conf.\n");
ret = -1;
goto out;
}
set_syslog_config (key_file);
out:
g_key_file_free (key_file);
g_free (seafile_conf);
return ret;
}
int main (int argc, char **argv)
{
if (argc <= 1) {
usage ();
exit (1);
}
char *config_dir = DEFAULT_CONFIG_DIR;
char *central_config_dir = NULL;
char *seafile_dir = NULL;
char *logdir = NULL;
char *ccnet_debug_level_str = "info";
char *seafile_debug_level_str = "debug";
int daemon_mode = 1;
gboolean test_conf = FALSE;
int c;
while ((c = getopt_long (argc, argv, short_opts,
long_opts, NULL)) != EOF)
{
switch (c) {
case 'h':
usage ();
exit(1);
break;
case 'v':
fprintf (stderr, "seafile-controller version 1.0\n");
exit(1);
break;
case 't':
test_conf = TRUE;
break;
case 'c':
config_dir = optarg;
break;
case 'F':
central_config_dir = g_strdup(optarg);
break;
case 'd':
seafile_dir = g_strdup(optarg);
break;
case 'f':
daemon_mode = 0;
break;
case 'L':
logdir = g_strdup(optarg);
break;
case 'g':
ccnet_debug_level_str = optarg;
break;
case 'G':
seafile_debug_level_str = optarg;
break;
case 'P':
controller_pidfile = optarg;
break;
default:
usage ();
exit (1);
}
}
#if !GLIB_CHECK_VERSION(2, 35, 0)
g_type_init();
#endif
#if !GLIB_CHECK_VERSION(2,32,0)
g_thread_init (NULL);
#endif
if (!seafile_dir) {
fprintf (stderr, " must be specified with --seafile-dir\n");
exit(1);
}
if (!central_config_dir) {
fprintf (stderr, " must be specified with --central-config-dir\n");
exit(1);
}
central_config_dir = ccnet_expand_path (central_config_dir);
config_dir = ccnet_expand_path (config_dir);
seafile_dir = ccnet_expand_path (seafile_dir);
if (test_conf) {
test_config (central_config_dir, config_dir, seafile_dir);
}
ctl = g_new0 (SeafileController, 1);
if (seaf_controller_init (ctl, central_config_dir, config_dir, seafile_dir, logdir) < 0) {
controller_exit(1);
}
char *logfile = g_build_filename (ctl->logdir, "controller.log", NULL);
if (seafile_log_init (logfile, ccnet_debug_level_str,
seafile_debug_level_str, "seafile-controller") < 0) {
fprintf (stderr, "Failed to init log.\n");
controller_exit (1);
}
if (init_syslog_config () < 0) {
controller_exit (1);
}
set_signal_handlers ();
enabled_go_fileserver = should_start_go_fileserver();
if (seaf_controller_start () < 0)
controller_exit (1);
const char *log_to_stdout_env = g_getenv("SEAFILE_LOG_TO_STDOUT");
if (g_strcmp0(log_to_stdout_env, "true") == 0) {
daemon_mode = 0;
}
#ifndef WIN32
if (daemon_mode) {
#ifndef __APPLE__
daemon (1, 0);
#else /* __APPLE */
/* daemon is deprecated under APPLE
* use fork() instead
* */
switch (fork ()) {
case -1:
seaf_warning ("Failed to daemonize");
exit (-1);
break;
case 0:
/* all good*/
break;
default:
/* kill origin process */
exit (0);
}
#endif /* __APPLE */
}
#endif /* !WIN32 */
if (controller_pidfile == NULL) {
controller_pidfile = g_strdup(g_getenv ("SEAFILE_PIDFILE"));
}
if (controller_pidfile != NULL) {
if (write_controller_pidfile () < 0) {
seaf_warning ("Failed to write pidfile %s\n", controller_pidfile);
return -1;
}
}
run_controller_loop ();
return 0;
}
================================================
FILE: controller/seafile-controller.h
================================================
/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
/*
* Seafile-controller is responsible for:
*
* 1. Start: start server processes:
*
* - ccnet-server
* - seaf-server
* - seaf-mon
*
* 2. Repair:
*
* - ensure ccnet process availability by watching client->connfd
* - ensure server processes availablity by checking process is running periodically
* If some process has stopped working, try to restart it.
*
*/
#ifndef SEAFILE_CONTROLLER_H
#define SEAFILE_CONTROLLER_H
typedef struct _SeafileController SeafileController;
enum {
PID_CCNET = 0,
PID_SERVER,
PID_FILESERVER,
PID_SEAFDAV,
PID_SEAFEVENTS,
N_PID
};
typedef struct SeafDavConfig {
gboolean enabled;
int port;
char *host;
gboolean debug_mode;
} SeafDavConfig;
struct _SeafileController {
char *central_config_dir;
char *config_dir;
char *seafile_dir;
char *rpc_pipe_path;
char *logdir;
guint check_process_timer;
guint client_io_id;
/* Decide whether to start seaf-server in cloud mode */
gboolean cloud_mode;
int pid[N_PID];
char *pidfile[N_PID];
SeafDavConfig seafdav_config;
gboolean has_seafevents;
};
#endif
================================================
FILE: doc/Makefile.am
================================================
EXTRA_DIST = seafile-tutorial.doc
================================================
FILE: fileserver/.golangci.yml
================================================
run:
timeout: 2m
linters:
enable:
- govet
- gosimple
- ineffassign
- staticcheck
- unused
- gofmt
disable:
- errcheck
================================================
FILE: fileserver/blockmgr/blockmgr.go
================================================
// Package blockmgr provides operations on blocks
package blockmgr
import (
"github.com/haiwen/seafile-server/fileserver/objstore"
"io"
)
var store *objstore.ObjectStore
// Init initializes block manager and creates underlying object store.
func Init(seafileConfPath string, seafileDataDir string) {
store = objstore.New(seafileConfPath, seafileDataDir, "blocks")
}
// Read reads block from storage backend.
func Read(repoID string, blockID string, w io.Writer) error {
err := store.Read(repoID, blockID, w)
if err != nil {
return err
}
return nil
}
// Write writes block to storage backend.
func Write(repoID string, blockID string, r io.Reader) error {
err := store.Write(repoID, blockID, r, false)
if err != nil {
return err
}
return nil
}
// Exists checks block if exists.
func Exists(repoID string, blockID string) bool {
ret, _ := store.Exists(repoID, blockID)
return ret
}
// Stat calculates block size.
func Stat(repoID string, blockID string) (int64, error) {
ret, err := store.Stat(repoID, blockID)
return ret, err
}
================================================
FILE: fileserver/blockmgr/blockmgr_test.go
================================================
package blockmgr
import (
"bytes"
"fmt"
"os"
"path"
"testing"
)
const (
blockID = "0401fc662e3bc87a41f299a907c056aaf8322a27"
repoID = "b1f2ad61-9164-418a-a47f-ab805dbd5694"
seafileConfPath = "/tmp/conf"
seafileDataDir = "/tmp/conf/seafile-data"
testFile = "output.data"
)
func delFile() error {
err := os.Remove(testFile)
if err != nil {
return err
}
err = os.RemoveAll(seafileConfPath)
if err != nil {
return err
}
return nil
}
func createFile() error {
outputFile, err := os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666)
if err != nil {
return err
}
defer outputFile.Close()
outputString := "hello world!\n"
for i := 0; i < 10; i++ {
outputFile.WriteString(outputString)
}
return nil
}
func TestMain(m *testing.M) {
err := createFile()
if err != nil {
fmt.Printf("Failed to create test file : %v\n", err)
os.Exit(1)
}
code := m.Run()
err = delFile()
if err != nil {
fmt.Printf("Failed to remove test file : %v\n", err)
os.Exit(1)
}
os.Exit(code)
}
func testBlockRead(t *testing.T) {
var buf bytes.Buffer
err := Read(repoID, blockID, &buf)
if err != nil {
t.Errorf("Failed to read block.\n")
}
}
func testBlockWrite(t *testing.T) {
inputFile, err := os.Open(testFile)
if err != nil {
t.Errorf("Failed to open test file : %v\n", err)
}
defer inputFile.Close()
err = Write(repoID, blockID, inputFile)
if err != nil {
t.Errorf("Failed to write block.\n")
}
}
func testBlockExists(t *testing.T) {
ret := Exists(repoID, blockID)
if !ret {
t.Errorf("Block is not exist\n")
}
filePath := path.Join(seafileDataDir, "storage", "blocks", repoID, blockID[:2], blockID[2:])
fileInfo, _ := os.Stat(filePath)
if fileInfo.Size() != 130 {
t.Errorf("Block is exist, but the size of file is incorrect.\n")
}
}
func TestBlock(t *testing.T) {
Init(seafileConfPath, seafileDataDir)
testBlockWrite(t)
testBlockRead(t)
testBlockExists(t)
}
================================================
FILE: fileserver/commitmgr/commitmgr.go
================================================
// Package commitmgr manages commit objects.
package commitmgr
import (
"bytes"
"crypto/sha1"
"encoding/binary"
"encoding/hex"
"encoding/json"
"fmt"
"io"
"time"
"github.com/haiwen/seafile-server/fileserver/objstore"
"github.com/haiwen/seafile-server/fileserver/utils"
)
// Commit is a commit object
type Commit struct {
CommitID string `json:"commit_id"`
RepoID string `json:"repo_id"`
RootID string `json:"root_id"`
CreatorName string `json:"creator_name,omitempty"`
CreatorID string `json:"creator"`
Desc string `json:"description"`
Ctime int64 `json:"ctime"`
ParentID String `json:"parent_id"`
SecondParentID String `json:"second_parent_id"`
RepoName string `json:"repo_name"`
RepoDesc string `json:"repo_desc"`
RepoCategory string `json:"repo_category"`
DeviceName string `json:"device_name,omitempty"`
ClientVersion string `json:"client_version,omitempty"`
Encrypted string `json:"encrypted,omitempty"`
EncVersion int `json:"enc_version,omitempty"`
Magic string `json:"magic,omitempty"`
RandomKey string `json:"key,omitempty"`
Salt string `json:"salt,omitempty"`
PwdHash string `json:"pwd_hash,omitempty"`
PwdHashAlgo string `json:"pwd_hash_algo,omitempty"`
PwdHashParams string `json:"pwd_hash_params,omitempty"`
Version int `json:"version,omitempty"`
Conflict int `json:"conflict,omitempty"`
NewMerge int `json:"new_merge,omitempty"`
Repaired int `json:"repaired,omitempty"`
}
var store *objstore.ObjectStore
// Init initializes commit manager and creates underlying object store.
func Init(seafileConfPath string, seafileDataDir string) {
store = objstore.New(seafileConfPath, seafileDataDir, "commits")
}
// NewCommit initializes a Commit object.
func NewCommit(repoID, parentID, newRoot, user, desc string) *Commit {
commit := new(Commit)
commit.RepoID = repoID
commit.RootID = newRoot
commit.Desc = desc
commit.CreatorName = user
commit.CreatorID = "0000000000000000000000000000000000000000"
commit.Ctime = time.Now().Unix()
commit.CommitID = computeCommitID(commit)
if parentID != "" {
commit.ParentID.SetValid(parentID)
}
return commit
}
func computeCommitID(commit *Commit) string {
hash := sha1.New()
hash.Write([]byte(commit.RootID))
hash.Write([]byte(commit.CreatorID))
hash.Write([]byte(commit.CreatorName))
hash.Write([]byte(commit.Desc))
tmpBuf := make([]byte, 8)
binary.BigEndian.PutUint64(tmpBuf, uint64(commit.Ctime))
hash.Write(tmpBuf)
checkSum := hash.Sum(nil)
id := hex.EncodeToString(checkSum[:])
return id
}
// FromData reads from p and converts JSON-encoded data to commit.
func (commit *Commit) FromData(p []byte) error {
err := json.Unmarshal(p, commit)
if err != nil {
return err
}
if !utils.IsValidUUID(commit.RepoID) {
return fmt.Errorf("repo id %s is invalid", commit.RepoID)
}
if !utils.IsObjectIDValid(commit.RootID) {
return fmt.Errorf("root id %s is invalid", commit.RootID)
}
if len(commit.CreatorID) != 40 {
return fmt.Errorf("creator id %s is invalid", commit.CreatorID)
}
if commit.ParentID.Valid && !utils.IsObjectIDValid(commit.ParentID.String) {
return fmt.Errorf("parent id %s is invalid", commit.ParentID.String)
}
if commit.SecondParentID.Valid && !utils.IsObjectIDValid(commit.SecondParentID.String) {
return fmt.Errorf("second parent id %s is invalid", commit.SecondParentID.String)
}
return nil
}
// ToData converts commit to JSON-encoded data and writes to w.
func (commit *Commit) ToData(w io.Writer) error {
jsonstr, err := json.Marshal(commit)
if err != nil {
return err
}
_, err = w.Write(jsonstr)
if err != nil {
return err
}
return nil
}
// ReadRaw reads data in binary format from storage backend.
func ReadRaw(repoID string, commitID string, w io.Writer) error {
err := store.Read(repoID, commitID, w)
if err != nil {
return err
}
return nil
}
// WriteRaw writes data in binary format to storage backend.
func WriteRaw(repoID string, commitID string, r io.Reader) error {
err := store.Write(repoID, commitID, r, false)
if err != nil {
return err
}
return nil
}
// Load commit from storage backend.
func Load(repoID string, commitID string) (*Commit, error) {
var buf bytes.Buffer
commit := new(Commit)
err := ReadRaw(repoID, commitID, &buf)
if err != nil {
return nil, err
}
err = commit.FromData(buf.Bytes())
if err != nil {
return nil, err
}
return commit, nil
}
// Save commit to storage backend.
func Save(commit *Commit) error {
var buf bytes.Buffer
err := commit.ToData(&buf)
if err != nil {
return err
}
err = WriteRaw(commit.RepoID, commit.CommitID, &buf)
if err != nil {
return err
}
return err
}
// Exists checks commit if exists.
func Exists(repoID string, commitID string) (bool, error) {
return store.Exists(repoID, commitID)
}
================================================
FILE: fileserver/commitmgr/commitmgr_test.go
================================================
package commitmgr
import (
"fmt"
"os"
"testing"
"time"
)
const (
commitID = "0401fc662e3bc87a41f299a907c056aaf8322a27"
rootID = "6a1608dc2a1248838464e9b194800d35252e2ce3"
repoID = "b1f2ad61-9164-418a-a47f-ab805dbd5694"
seafileConfPath = "/tmp/conf"
seafileDataDir = "/tmp/conf/seafile-data"
)
func delFile() error {
err := os.RemoveAll(seafileConfPath)
if err != nil {
return err
}
return nil
}
func TestMain(m *testing.M) {
code := m.Run()
err := delFile()
if err != nil {
fmt.Printf("Failed to remove test file : %v\n", err)
os.Exit(1)
}
os.Exit(code)
}
func assertEqual(t *testing.T, a, b interface{}) {
if a != b {
t.Errorf("Not Equal.%t,%t", a, b)
}
}
func TestCommit(t *testing.T) {
Init(seafileConfPath, seafileDataDir)
newCommit := new(Commit)
newCommit.CommitID = commitID
newCommit.RepoID = repoID
newCommit.RootID = rootID
newCommit.CreatorName = "seafile"
newCommit.CreatorID = commitID
newCommit.Desc = "This is a commit"
newCommit.Ctime = time.Now().Unix()
newCommit.ParentID.SetValid(commitID)
newCommit.DeviceName = "Linux"
err := Save(newCommit)
if err != nil {
t.Errorf("Failed to save commit.\n")
}
commit, err := Load(repoID, commitID)
if err != nil {
t.Errorf("Failed to load commit: %v.\n", err)
}
assertEqual(t, commit.CommitID, commitID)
assertEqual(t, commit.RepoID, repoID)
assertEqual(t, commit.CreatorName, "seafile")
assertEqual(t, commit.CreatorID, commitID)
assertEqual(t, commit.ParentID.String, commitID)
}
================================================
FILE: fileserver/commitmgr/null.go
================================================
package commitmgr
import (
"bytes"
"database/sql"
"encoding/json"
"fmt"
)
// nullBytes is a JSON null literal
var nullBytes = []byte("null")
// String is a nullable string. It supports SQL and JSON serialization.
// It will marshal to null if null. Blank string input will be considered null.
type String struct {
sql.NullString
}
// StringFrom creates a new String that will never be blank.
func StringFrom(s string) String {
return NewString(s, true)
}
// StringFromPtr creates a new String that be null if s is nil.
func StringFromPtr(s *string) String {
if s == nil {
return NewString("", false)
}
return NewString(*s, true)
}
// ValueOrZero returns the inner value if valid, otherwise zero.
func (s String) ValueOrZero() string {
if !s.Valid {
return ""
}
return s.String
}
// NewString creates a new String
func NewString(s string, valid bool) String {
return String{
NullString: sql.NullString{
String: s,
Valid: valid,
},
}
}
// UnmarshalJSON implements json.Unmarshaler.
// It supports string and null input. Blank string input does not produce a null String.
func (s *String) UnmarshalJSON(data []byte) error {
if bytes.Equal(data, nullBytes) {
s.Valid = false
return nil
}
if err := json.Unmarshal(data, &s.String); err != nil {
return fmt.Errorf("null: couldn't unmarshal JSON: %w", err)
}
s.Valid = true
return nil
}
// MarshalJSON implements json.Marshaler.
// It will encode null if this String is null.
func (s String) MarshalJSON() ([]byte, error) {
if !s.Valid {
return []byte("null"), nil
}
return json.Marshal(s.String)
}
// MarshalText implements encoding.TextMarshaler.
// It will encode a blank string when this String is null.
func (s String) MarshalText() ([]byte, error) {
if !s.Valid {
return []byte{}, nil
}
return []byte(s.String), nil
}
// UnmarshalText implements encoding.TextUnmarshaler.
// It will unmarshal to a null String if the input is a blank string.
func (s *String) UnmarshalText(text []byte) error {
s.String = string(text)
s.Valid = s.String != ""
return nil
}
// SetValid changes this String's value and also sets it to be non-null.
func (s *String) SetValid(v string) {
s.String = v
s.Valid = true
}
// Ptr returns a pointer to this String's value, or a nil pointer if this String is null.
func (s String) Ptr() *string {
if !s.Valid {
return nil
}
return &s.String
}
// IsZero returns true for null strings, for potential future omitempty support.
func (s String) IsZero() bool {
return !s.Valid
}
// Equal returns true if both strings have the same value or are both null.
func (s String) Equal(other String) bool {
return s.Valid == other.Valid && (!s.Valid || s.String == other.String)
}
================================================
FILE: fileserver/crypt.go
================================================
package main
import (
"bytes"
"crypto/aes"
"crypto/cipher"
)
type seafileCrypt struct {
key []byte
iv []byte
version int
}
func (crypt *seafileCrypt) encrypt(input []byte) ([]byte, error) {
key := crypt.key
if crypt.version == 3 {
key = to16Bytes(key)
}
block, err := aes.NewCipher(key)
if err != nil {
return nil, err
}
size := block.BlockSize()
input = pkcs7Padding(input, size)
out := make([]byte, len(input))
if crypt.version == 3 {
for bs, be := 0, size; bs < len(input); bs, be = bs+size, be+size {
block.Encrypt(out[bs:be], input[bs:be])
}
return out, nil
}
blockMode := cipher.NewCBCEncrypter(block, crypt.iv)
blockMode.CryptBlocks(out, input)
return out, nil
}
func (crypt *seafileCrypt) decrypt(input []byte) ([]byte, error) {
key := crypt.key
if crypt.version == 3 {
key = to16Bytes(key)
}
block, err := aes.NewCipher(key)
if err != nil {
return nil, err
}
out := make([]byte, len(input))
size := block.BlockSize()
if crypt.version == 3 {
// Encryption repo v3 uses AES_128_ecb mode to encrypt and decrypt, each block is encrypted and decrypted independently,
// there is no relationship before and after, and iv is not required.
for bs, be := 0, size; bs < len(input); bs, be = bs+size, be+size {
block.Decrypt(out[bs:be], input[bs:be])
}
out = pkcs7UnPadding(out)
return out, nil
}
blockMode := cipher.NewCBCDecrypter(block, crypt.iv)
blockMode.CryptBlocks(out, input)
out = pkcs7UnPadding(out)
return out, nil
}
func pkcs7Padding(p []byte, blockSize int) []byte {
padding := blockSize - len(p)%blockSize
padtext := bytes.Repeat([]byte{byte(padding)}, padding)
return append(p, padtext...)
}
func pkcs7UnPadding(p []byte) []byte {
length := len(p)
paddLen := int(p[length-1])
return p[:(length - paddLen)]
}
func to16Bytes(input []byte) []byte {
out := make([]byte, 16)
copy(out, input)
return out
}
================================================
FILE: fileserver/diff/diff.go
================================================
package diff
import (
"context"
"fmt"
"io"
"path/filepath"
"strings"
"github.com/haiwen/seafile-server/fileserver/commitmgr"
"github.com/haiwen/seafile-server/fileserver/fsmgr"
"github.com/haiwen/seafile-server/fileserver/repomgr"
)
// Empty value of sha1
const (
EmptySha1 = "0000000000000000000000000000000000000000"
)
type fileCB func(context.Context, string, []*fsmgr.SeafDirent, interface{}) error
type dirCB func(context.Context, string, []*fsmgr.SeafDirent, interface{}, *bool) error
type DiffOptions struct {
FileCB fileCB
DirCB dirCB
RepoID string
Ctx context.Context
Data interface{}
Reader io.ReadCloser
}
type diffData struct {
foldDirDiff bool
results *[]*DiffEntry
}
func DiffTrees(roots []string, opt *DiffOptions) error {
reader := fsmgr.GetOneZlibReader()
defer fsmgr.ReturnOneZlibReader(reader)
opt.Reader = reader
n := len(roots)
if n != 2 && n != 3 {
err := fmt.Errorf("the number of commit trees is illegal")
return err
}
trees := make([]*fsmgr.SeafDir, n)
for i := 0; i < n; i++ {
root, err := fsmgr.GetSeafdirWithZlibReader(opt.RepoID, roots[i], opt.Reader)
if err != nil {
err := fmt.Errorf("Failed to find dir %s:%s", opt.RepoID, roots[i])
return err
}
trees[i] = root
}
return diffTreesRecursive(trees, "", opt)
}
func diffTreesRecursive(trees []*fsmgr.SeafDir, baseDir string, opt *DiffOptions) error {
n := len(trees)
ptrs := make([][]*fsmgr.SeafDirent, 3)
for i := 0; i < n; i++ {
if trees[i] != nil {
ptrs[i] = trees[i].Entries
} else {
ptrs[i] = nil
}
}
var firstName string
var done bool
var offset = make([]int, n)
for {
dents := make([]*fsmgr.SeafDirent, 3)
firstName = ""
done = true
for i := 0; i < n; i++ {
if len(ptrs[i]) > offset[i] {
done = false
dent := ptrs[i][offset[i]]
if firstName == "" {
firstName = dent.Name
} else if strings.Compare(dent.Name, firstName) > 0 {
firstName = dent.Name
}
}
}
if done {
break
}
for i := 0; i < n; i++ {
if len(ptrs[i]) > offset[i] {
dent := ptrs[i][offset[i]]
if firstName == dent.Name {
dents[i] = dent
offset[i]++
}
}
}
if n == 2 && dents[0] != nil && dents[1] != nil &&
direntSame(dents[0], dents[1]) {
continue
}
if n == 3 && dents[0] != nil && dents[1] != nil &&
dents[2] != nil && direntSame(dents[0], dents[1]) &&
direntSame(dents[0], dents[2]) {
continue
}
if err := diffFiles(baseDir, dents, opt); err != nil {
return err
}
if err := diffDirectories(baseDir, dents, opt); err != nil {
return err
}
}
return nil
}
func diffFiles(baseDir string, dents []*fsmgr.SeafDirent, opt *DiffOptions) error {
n := len(dents)
var nFiles int
files := make([]*fsmgr.SeafDirent, 3)
for i := 0; i < n; i++ {
if dents[i] != nil && fsmgr.IsRegular(dents[i].Mode) {
files[i] = dents[i]
nFiles++
}
}
if nFiles == 0 {
return nil
}
return opt.FileCB(opt.Ctx, baseDir, files, opt.Data)
}
func diffDirectories(baseDir string, dents []*fsmgr.SeafDirent, opt *DiffOptions) error {
n := len(dents)
dirs := make([]*fsmgr.SeafDirent, 3)
subDirs := make([]*fsmgr.SeafDir, 3)
var nDirs int
for i := 0; i < n; i++ {
if dents[i] != nil && fsmgr.IsDir(dents[i].Mode) {
dirs[i] = dents[i]
nDirs++
}
}
if nDirs == 0 {
return nil
}
recurse := true
err := opt.DirCB(opt.Ctx, baseDir, dirs, opt.Data, &recurse)
if err != nil {
err := fmt.Errorf("failed to call dir callback: %w", err)
return err
}
if !recurse {
return nil
}
var dirName string
for i := 0; i < n; i++ {
if dents[i] != nil && fsmgr.IsDir(dents[i].Mode) {
dir, err := fsmgr.GetSeafdirWithZlibReader(opt.RepoID, dents[i].ID, opt.Reader)
if err != nil {
err := fmt.Errorf("Failed to find dir %s:%s", opt.RepoID, dents[i].ID)
return err
}
subDirs[i] = dir
dirName = dents[i].Name
}
}
newBaseDir := baseDir + dirName + "/"
return diffTreesRecursive(subDirs, newBaseDir, opt)
}
func direntSame(dentA, dentB *fsmgr.SeafDirent) bool {
return dentA.ID == dentB.ID &&
dentA.Mode == dentB.Mode &&
dentA.Mtime == dentB.Mtime
}
// Diff type and diff status.
const (
DiffTypeCommits = 'C' /* diff between two commits*/
DiffStatusAdded = 'A'
DiffStatusDeleted = 'D'
DiffStatusModified = 'M'
DiffStatusRenamed = 'R'
DiffStatusUnmerged = 'U'
DiffStatusDirAdded = 'B'
DiffStatusDirDeleted = 'C'
DiffStatusDirRenamed = 'E'
)
type DiffEntry struct {
DiffType rune
Status rune
Sha1 string
Name string
NewName string
Size int64
OriginSize int64
}
func diffEntryNewFromDirent(diffType, status rune, dent *fsmgr.SeafDirent, baseDir string) *DiffEntry {
de := new(DiffEntry)
de.Sha1 = dent.ID
de.DiffType = diffType
de.Status = status
de.Size = dent.Size
de.Name = filepath.Join(baseDir, dent.Name)
return de
}
func diffEntryNew(diffType, status rune, dirID, name string) *DiffEntry {
de := new(DiffEntry)
de.DiffType = diffType
de.Status = status
de.Sha1 = dirID
de.Name = name
return de
}
func DiffMergeRoots(storeID, mergedRoot, p1Root, p2Root string, results *[]*DiffEntry, foldDirDiff bool) error {
roots := []string{mergedRoot, p1Root, p2Root}
opt := new(DiffOptions)
opt.RepoID = storeID
opt.FileCB = threewayDiffFiles
opt.DirCB = threewayDiffDirs
opt.Data = diffData{foldDirDiff, results}
err := DiffTrees(roots, opt)
if err != nil {
err := fmt.Errorf("failed to diff trees: %v", err)
return err
}
diffResolveRenames(results)
return nil
}
func threewayDiffFiles(ctx context.Context, baseDir string, dents []*fsmgr.SeafDirent, optData interface{}) error {
m := dents[0]
p1 := dents[1]
p2 := dents[2]
data, ok := optData.(diffData)
if !ok {
err := fmt.Errorf("failed to assert diff data")
return err
}
results := data.results
if m != nil && p1 != nil && p2 != nil {
if !direntSame(m, p1) && !direntSame(m, p2) {
de := diffEntryNewFromDirent(DiffTypeCommits, DiffStatusModified, m, baseDir)
*results = append(*results, de)
}
} else if m == nil && p1 != nil && p2 != nil {
de := diffEntryNewFromDirent(DiffTypeCommits, DiffStatusDeleted, p1, baseDir)
*results = append(*results, de)
} else if m != nil && p1 == nil && p2 != nil {
if !direntSame(m, p2) {
de := diffEntryNewFromDirent(DiffTypeCommits, DiffStatusModified, m, baseDir)
*results = append(*results, de)
}
} else if m != nil && p1 != nil && p2 == nil {
if !direntSame(m, p1) {
de := diffEntryNewFromDirent(DiffTypeCommits, DiffStatusModified, m, baseDir)
*results = append(*results, de)
}
} else if m != nil && p1 == nil && p2 == nil {
de := diffEntryNewFromDirent(DiffTypeCommits, DiffStatusAdded, m, baseDir)
*results = append(*results, de)
}
return nil
}
func threewayDiffDirs(ctx context.Context, baseDir string, dents []*fsmgr.SeafDirent, optData interface{}, recurse *bool) error {
*recurse = true
return nil
}
func DiffCommitRoots(storeID, p1Root, p2Root string, results *[]*DiffEntry, foldDirDiff bool) error {
roots := []string{p1Root, p2Root}
opt := new(DiffOptions)
opt.RepoID = storeID
opt.FileCB = twowayDiffFiles
opt.DirCB = twowayDiffDirs
opt.Data = diffData{foldDirDiff, results}
err := DiffTrees(roots, opt)
if err != nil {
err := fmt.Errorf("failed to diff trees: %v", err)
return err
}
diffResolveRenames(results)
return nil
}
func DiffCommits(commit1, commit2 *commitmgr.Commit, results *[]*DiffEntry, foldDirDiff bool) error {
repo := repomgr.Get(commit1.RepoID)
if repo == nil {
err := fmt.Errorf("failed to get repo %s", commit1.RepoID)
return err
}
roots := []string{commit1.RootID, commit2.RootID}
opt := new(DiffOptions)
opt.RepoID = repo.StoreID
opt.FileCB = twowayDiffFiles
opt.DirCB = twowayDiffDirs
opt.Data = diffData{foldDirDiff, results}
err := DiffTrees(roots, opt)
if err != nil {
err := fmt.Errorf("failed to diff trees: %v", err)
return err
}
diffResolveRenames(results)
return nil
}
func twowayDiffFiles(ctx context.Context, baseDir string, dents []*fsmgr.SeafDirent, optData interface{}) error {
p1 := dents[0]
p2 := dents[1]
data, ok := optData.(diffData)
if !ok {
err := fmt.Errorf("failed to assert diff data")
return err
}
results := data.results
if p1 == nil {
de := diffEntryNewFromDirent(DiffTypeCommits, DiffStatusAdded, p2, baseDir)
*results = append(*results, de)
return nil
}
if p2 == nil {
de := diffEntryNewFromDirent(DiffTypeCommits, DiffStatusDeleted, p1, baseDir)
*results = append(*results, de)
return nil
}
if !direntSame(p1, p2) {
de := diffEntryNewFromDirent(DiffTypeCommits, DiffStatusModified, p2, baseDir)
de.OriginSize = p1.Size
*results = append(*results, de)
}
return nil
}
func twowayDiffDirs(ctx context.Context, baseDir string, dents []*fsmgr.SeafDirent, optData interface{}, recurse *bool) error {
p1 := dents[0]
p2 := dents[1]
data, ok := optData.(diffData)
if !ok {
err := fmt.Errorf("failed to assert diff data")
return err
}
results := data.results
if p1 == nil {
if p2.ID == EmptySha1 || data.foldDirDiff {
de := diffEntryNewFromDirent(DiffTypeCommits, DiffStatusDirAdded, p2, baseDir)
*results = append(*results, de)
*recurse = false
} else {
*recurse = true
}
return nil
}
if p2 == nil {
de := diffEntryNewFromDirent(DiffTypeCommits, DiffStatusDirDeleted, p1, baseDir)
*results = append(*results, de)
if data.foldDirDiff {
*recurse = false
} else {
*recurse = true
}
}
return nil
}
func diffResolveRenames(des *[]*DiffEntry) error {
var deletedEmptyCount, deletedEmptyDirCount, addedEmptyCount, addedEmptyDirCount int
for _, de := range *des {
if de.Sha1 == EmptySha1 {
if de.Status == DiffStatusDeleted {
deletedEmptyCount++
}
if de.Status == DiffStatusDirDeleted {
deletedEmptyDirCount++
}
if de.Status == DiffStatusAdded {
addedEmptyCount++
}
if de.Status == DiffStatusDirAdded {
addedEmptyDirCount++
}
}
}
deletedFiles := make(map[string]*DiffEntry)
deletedDirs := make(map[string]*DiffEntry)
var results []*DiffEntry
var added []*DiffEntry
checkEmptyDir := (deletedEmptyDirCount == 1 && addedEmptyDirCount == 1)
checkEmptyFile := (deletedEmptyCount == 1 && addedEmptyCount == 1)
for _, de := range *des {
if de.Status == DiffStatusDeleted {
if de.Sha1 == EmptySha1 && !checkEmptyFile {
results = append(results, de)
continue
}
if _, ok := deletedFiles[de.Sha1]; ok {
results = append(results, de)
continue
}
deletedFiles[de.Sha1] = de
}
if de.Status == DiffStatusDirDeleted {
if de.Sha1 == EmptySha1 && !checkEmptyDir {
results = append(results, de)
continue
}
if _, ok := deletedDirs[de.Sha1]; ok {
results = append(results, de)
continue
}
deletedDirs[de.Sha1] = de
}
if de.Status == DiffStatusAdded {
if de.Sha1 == EmptySha1 && !checkEmptyFile {
results = append(results, de)
continue
}
added = append(added, de)
}
if de.Status == DiffStatusDirAdded {
if de.Sha1 == EmptySha1 && !checkEmptyDir {
results = append(results, de)
continue
}
added = append(added, de)
}
if de.Status == DiffStatusModified {
results = append(results, de)
}
}
for _, de := range added {
var deAdd, deDel, deRename *DiffEntry
var renameStatus rune
deAdd = de
if deAdd.Status == DiffStatusAdded {
deTmp, ok := deletedFiles[de.Sha1]
if !ok {
results = append(results, deAdd)
continue
}
deDel = deTmp
} else {
deTmp, ok := deletedDirs[de.Sha1]
if !ok {
results = append(results, deAdd)
continue
}
deDel = deTmp
}
if deAdd.Status == DiffStatusDirAdded {
renameStatus = DiffStatusDirRenamed
} else {
renameStatus = DiffStatusRenamed
}
deRename = diffEntryNew(deDel.DiffType, renameStatus, deDel.Sha1, deDel.Name)
deRename.NewName = de.Name
results = append(results, deRename)
if deDel.Status == DiffStatusDirDeleted {
delete(deletedDirs, deAdd.Sha1)
} else {
delete(deletedFiles, deAdd.Sha1)
}
}
for _, de := range deletedFiles {
results = append(results, de)
}
for _, de := range deletedDirs {
results = append(results, de)
}
*des = results
return nil
}
func DiffResultsToDesc(results []*DiffEntry) string {
var nAddMod, nRemoved, nRenamed int
var nNewDir, nRemovedDir int
var addModFile, removedFile string
var renamedFile string
var newDir, removedDir string
var desc string
if results == nil {
return ""
}
for _, de := range results {
switch de.Status {
case DiffStatusAdded:
if nAddMod == 0 {
addModFile = filepath.Base(de.Name)
}
nAddMod++
case DiffStatusDeleted:
if nRemoved == 0 {
removedFile = filepath.Base(de.Name)
}
nRemoved++
case DiffStatusRenamed:
if nRenamed == 0 {
renamedFile = filepath.Base(de.Name)
}
nRenamed++
case DiffStatusModified:
if nAddMod == 0 {
addModFile = filepath.Base(de.Name)
}
nAddMod++
case DiffStatusDirAdded:
if nNewDir == 0 {
newDir = filepath.Base(de.Name)
}
nNewDir++
case DiffStatusDirDeleted:
if nRemovedDir == 0 {
removedDir = filepath.Base(de.Name)
}
nRemovedDir++
}
}
if nAddMod == 1 {
desc = fmt.Sprintf("Added or modified \"%s\".\n", addModFile)
} else if nAddMod > 1 {
desc = fmt.Sprintf("Added or modified \"%s\" and %d more files.\n", addModFile, nAddMod-1)
}
if nRemoved == 1 {
desc += fmt.Sprintf("Deleted \"%s\".\n", removedFile)
} else if nRemoved > 1 {
desc += fmt.Sprintf("Deleted \"%s\" and %d more files.\n", removedFile, nRemoved-1)
}
if nRenamed == 1 {
desc += fmt.Sprintf("Renamed \"%s\".\n", renamedFile)
} else if nRenamed > 1 {
desc += fmt.Sprintf("Renamed \"%s\" and %d more files.\n", renamedFile, nRenamed-1)
}
if nNewDir == 1 {
desc += fmt.Sprintf("Added directory \"%s\".\n", newDir)
} else if nNewDir > 1 {
desc += fmt.Sprintf("Added \"%s\" and %d more directories.\n", newDir, nNewDir-1)
}
if nRemovedDir == 1 {
desc += fmt.Sprintf("Removed directory \"%s\".\n", removedDir)
} else if nRemovedDir > 1 {
desc += fmt.Sprintf("Removed \"%s\" and %d more directories.\n", removedDir, nRemovedDir-1)
}
return desc
}
================================================
FILE: fileserver/diff/diff_test.go
================================================
package diff
import (
"context"
"fmt"
"os"
"syscall"
"testing"
"github.com/haiwen/seafile-server/fileserver/fsmgr"
)
const (
emptySHA1 = "0000000000000000000000000000000000000000"
diffTestSeafileConfPath = "/tmp/conf"
diffTestSeafileDataDir = "/tmp/conf/seafile-data"
diffTestRepoID = "0d18a711-c988-4f7b-960c-211b34705ce3"
)
var diffTestTree1 string
var diffTestTree2 string
var diffTestTree3 string
var diffTestTree4 string
var diffTestFileID string
var diffTestDirID1 string
var diffTestDirID2 string
/*
test directory structure:
tree1
|--
tree2
|--file
tree3
|--dir
tree4
|--dir
|-- file
*/
func TestDiffTrees(t *testing.T) {
fsmgr.Init(diffTestSeafileConfPath, diffTestSeafileDataDir, 2<<30)
err := diffTestCreateTestDir()
if err != nil {
fmt.Printf("failed to create test dir: %v", err)
os.Exit(1)
}
t.Run("test1", testDiffTrees1)
t.Run("test2", testDiffTrees2)
t.Run("test3", testDiffTrees3)
t.Run("test4", testDiffTrees4)
t.Run("test5", testDiffTrees5)
err = diffTestDelFile()
if err != nil {
fmt.Printf("failed to remove test file : %v", err)
}
}
func diffTestCreateTestDir() error {
modeDir := uint32(syscall.S_IFDIR | 0644)
modeFile := uint32(syscall.S_IFREG | 0644)
dir1, err := diffTestCreateSeafdir(nil)
if err != nil {
err := fmt.Errorf("failed to get seafdir: %v", err)
return err
}
diffTestTree1 = dir1
file1, err := fsmgr.NewSeafile(1, 1, nil)
if err != nil {
err := fmt.Errorf("failed to new seafile: %v", err)
return err
}
diffTestFileID = file1.FileID
err = fsmgr.SaveSeafile(diffTestRepoID, file1)
if err != nil {
err := fmt.Errorf("failed to save seafile: %v", err)
return err
}
dent1 := fsmgr.SeafDirent{ID: file1.FileID, Name: "file", Mode: modeFile, Size: 1}
dir2, err := diffTestCreateSeafdir([]*fsmgr.SeafDirent{&dent1})
if err != nil {
err := fmt.Errorf("failed to get seafdir: %v", err)
return err
}
diffTestTree2 = dir2
dent2 := fsmgr.SeafDirent{ID: dir1, Name: "dir", Mode: modeDir}
diffTestDirID1 = dir1
dir3, err := diffTestCreateSeafdir([]*fsmgr.SeafDirent{&dent2})
if err != nil {
err := fmt.Errorf("failed to get seafdir: %v", err)
return err
}
diffTestTree3 = dir3
dent3 := fsmgr.SeafDirent{ID: dir2, Name: "dir", Mode: modeDir}
diffTestDirID2 = dir2
dir4, err := diffTestCreateSeafdir([]*fsmgr.SeafDirent{&dent3})
if err != nil {
err := fmt.Errorf("failed to get seafdir: %v", err)
return err
}
diffTestTree4 = dir4
return nil
}
func testDiffTrees1(t *testing.T) {
var results []interface{}
opt := &DiffOptions{
FileCB: diffTestFileCB,
DirCB: diffTestDirCB,
RepoID: diffTestRepoID}
opt.Data = &results
DiffTrees([]string{diffTestTree2, diffTestTree1}, opt)
if len(results) != 1 {
t.Errorf("data length is %d not 1", len(results))
}
var ret = make([]string, len(results))
for k, v := range results {
ret[k] = fmt.Sprintf("%s", v)
}
if ret[0] != diffTestFileID {
t.Errorf("result %s != %s", ret[0], diffTestFileID)
}
}
func testDiffTrees2(t *testing.T) {
var results []interface{}
opt := &DiffOptions{
FileCB: diffTestFileCB,
DirCB: diffTestDirCB,
RepoID: diffTestRepoID}
opt.Data = &results
DiffTrees([]string{diffTestTree3, diffTestTree1}, opt)
if len(results) != 1 {
t.Errorf("data length is %d not 1", len(results))
}
var ret = make([]string, len(results))
for k, v := range results {
ret[k] = fmt.Sprintf("%s", v)
}
if ret[0] != diffTestDirID1 {
t.Errorf("result %s != %s", ret[0], diffTestDirID1)
}
}
func testDiffTrees3(t *testing.T) {
var results []interface{}
opt := &DiffOptions{
FileCB: diffTestFileCB,
DirCB: diffTestDirCB,
RepoID: diffTestRepoID}
opt.Data = &results
DiffTrees([]string{diffTestTree4, diffTestTree1}, opt)
if len(results) != 2 {
t.Errorf("data length is %d not 1", len(results))
}
var ret = make([]string, len(results))
for k, v := range results {
ret[k] = fmt.Sprintf("%s", v)
}
if ret[0] != diffTestDirID2 {
t.Errorf("result %s != %s", ret[0], diffTestDirID2)
}
if ret[1] != diffTestFileID {
t.Errorf("result %s != %s", ret[1], diffTestFileID)
}
}
func testDiffTrees4(t *testing.T) {
var results []interface{}
opt := &DiffOptions{
FileCB: diffTestFileCB,
DirCB: diffTestDirCB,
RepoID: diffTestRepoID}
opt.Data = &results
DiffTrees([]string{diffTestTree4, diffTestTree3}, opt)
if len(results) != 2 {
t.Errorf("data length is %d not 1", len(results))
}
var ret = make([]string, len(results))
for k, v := range results {
ret[k] = fmt.Sprintf("%s", v)
}
if ret[0] != diffTestDirID2 {
t.Errorf("result %s != %s", ret[0], diffTestDirID2)
}
if ret[1] != diffTestFileID {
t.Errorf("result %s != %s", ret[1], diffTestFileID)
}
}
func testDiffTrees5(t *testing.T) {
var results []interface{}
opt := &DiffOptions{
FileCB: diffTestFileCB,
DirCB: diffTestDirCB,
RepoID: diffTestRepoID}
opt.Data = &results
DiffTrees([]string{diffTestTree3, diffTestTree2}, opt)
if len(results) != 1 {
t.Errorf("data length is %d not 1", len(results))
}
var ret = make([]string, len(results))
for k, v := range results {
ret[k] = fmt.Sprintf("%s", v)
}
if ret[0] != diffTestDirID1 {
t.Errorf("result %s != %s", ret[0], diffTestDirID1)
}
}
func diffTestCreateSeafdir(dents []*fsmgr.SeafDirent) (string, error) {
seafdir, err := fsmgr.NewSeafdir(1, dents)
if err != nil {
return "", err
}
err = fsmgr.SaveSeafdir(diffTestRepoID, seafdir)
if err != nil {
return "", err
}
return seafdir.DirID, nil
}
func diffTestDelFile() error {
err := os.RemoveAll(diffTestSeafileConfPath)
if err != nil {
return err
}
return nil
}
func diffTestFileCB(ctx context.Context, baseDir string, files []*fsmgr.SeafDirent, data interface{}) error {
file1 := files[0]
file2 := files[1]
results, ok := data.(*[]interface{})
if !ok {
err := fmt.Errorf("failed to assert results")
return err
}
if file1 != nil &&
(file2 == nil || file1.ID != file2.ID) {
*results = append(*results, file1.ID)
}
return nil
}
func diffTestDirCB(ctx context.Context, baseDir string, dirs []*fsmgr.SeafDirent, data interface{}, recurse *bool) error {
dir1 := dirs[0]
dir2 := dirs[1]
results, ok := data.(*[]interface{})
if !ok {
err := fmt.Errorf("failed to assert results")
return err
}
if dir1 != nil &&
(dir2 == nil || dir1.ID != dir2.ID) {
*results = append(*results, dir1.ID)
}
return nil
}
================================================
FILE: fileserver/fileop.go
================================================
package main
import (
"archive/zip"
"bytes"
"context"
"crypto/sha1"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"io"
"mime"
"mime/multipart"
"net"
"net/http"
"net/url"
"os"
"path/filepath"
"runtime/debug"
"strconv"
"strings"
"sync"
"time"
"unicode/utf8"
"database/sql"
"math/rand"
"sort"
"syscall"
"github.com/gorilla/mux"
"github.com/haiwen/seafile-server/fileserver/blockmgr"
"github.com/haiwen/seafile-server/fileserver/commitmgr"
"github.com/haiwen/seafile-server/fileserver/diff"
"github.com/haiwen/seafile-server/fileserver/fsmgr"
"github.com/haiwen/seafile-server/fileserver/option"
"github.com/haiwen/seafile-server/fileserver/repomgr"
"github.com/haiwen/seafile-server/fileserver/utils"
"github.com/haiwen/seafile-server/fileserver/workerpool"
log "github.com/sirupsen/logrus"
"golang.org/x/text/unicode/norm"
)
const (
cacheBlockMapThreshold = 1 << 23
blockMapCacheExpiretime int64 = 3600 * 24
fileopCleaningIntervalSec = 3600
duplicateNamesCount = 1000
)
var (
blockMapCacheTable sync.Map
indexFilePool *workerpool.WorkPool
)
// Dirents is an alias for slice of SeafDirent.
type Dirents []*fsmgr.SeafDirent
func (d Dirents) Less(i, j int) bool {
return d[i].Name > d[j].Name
}
func (d Dirents) Swap(i, j int) {
d[i], d[j] = d[j], d[i]
}
func (d Dirents) Len() int {
return len(d)
}
func fileopInit() {
ticker := time.NewTicker(time.Second * fileopCleaningIntervalSec)
go RecoverWrapper(func() {
for range ticker.C {
removeFileopExpireCache()
}
})
indexFilePool = workerpool.CreateWorkerPool(indexFileWorker, int(option.MaxIndexingFiles))
}
func initUpload() {
objDir := filepath.Join(dataDir, "httptemp", "cluster-shared")
os.MkdirAll(objDir, os.ModePerm)
}
// contentType = "application/octet-stream"
func parseContentType(fileName string) string {
var contentType string
parts := strings.Split(fileName, ".")
if len(parts) >= 2 {
suffix := parts[len(parts)-1]
suffix = strings.ToLower(suffix)
switch suffix {
case "txt":
contentType = "text/plain"
case "doc":
contentType = "application/vnd.ms-word"
case "docx":
contentType = "application/vnd.openxmlformats-officedocument.wordprocessingml.document"
case "ppt":
contentType = "application/vnd.ms-powerpoint"
case "xls":
contentType = "application/vnd.ms-excel"
case "xlsx":
contentType = "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet"
case "pdf":
contentType = "application/pdf"
case "zip":
contentType = "application/zip"
case "mp3":
contentType = "audio/mp3"
case "mpeg":
contentType = "video/mpeg"
case "mp4":
contentType = "video/mp4"
case "ogv":
contentType = "video/ogg"
case "mov":
contentType = "video/mp4"
case "webm":
contentType = "video/webm"
case "mkv":
contentType = "video/x-matroska"
case "jpeg", "JPEG", "jpg", "JPG":
contentType = "image/jpeg"
case "png", "PNG":
contentType = "image/png"
case "gif", "GIF":
contentType = "image/gif"
case "svg", "SVG":
contentType = "image/svg+xml"
case "heic":
contentType = "image/heic"
case "ico":
contentType = "image/x-icon"
case "bmp":
contentType = "image/bmp"
case "tif", "tiff":
contentType = "image/tiff"
case "psd":
contentType = "image/vnd.adobe.photoshop"
case "webp":
contentType = "image/webp"
case "jfif":
contentType = "image/jpeg"
}
}
return contentType
}
func accessCB(rsp http.ResponseWriter, r *http.Request) *appError {
parts := strings.Split(r.URL.Path[1:], "/")
if len(parts) < 3 {
msg := "Invalid URL"
return &appError{nil, msg, http.StatusBadRequest}
}
token := parts[1]
fileName := parts[2]
accessInfo, err := parseWebaccessInfo(token)
if err != nil {
return err
}
repoID := accessInfo.repoID
op := accessInfo.op
user := accessInfo.user
objID := accessInfo.objID
if op != "view" && op != "download" && op != "download-link" {
msg := "Operation does not match access token."
return &appError{nil, msg, http.StatusForbidden}
}
if _, ok := r.Header["If-Modified-Since"]; ok {
return &appError{nil, "", http.StatusNotModified}
}
now := time.Now()
rsp.Header().Set("ETag", objID)
rsp.Header().Set("Last-Modified", now.Format("Mon, 2 Jan 2006 15:04:05 GMT"))
rsp.Header().Set("Cache-Control", "max-age=3600")
ranges := r.Header["Range"]
byteRanges := strings.Join(ranges, "")
repo := repomgr.Get(repoID)
if repo == nil {
msg := "Bad repo id"
return &appError{nil, msg, http.StatusBadRequest}
}
var cryptKey *seafileCrypt
if repo.IsEncrypted {
key, err := parseCryptKey(rsp, repoID, user, repo.EncVersion)
if err != nil {
return err
}
cryptKey = key
}
exists, _ := fsmgr.Exists(repo.StoreID, objID)
if !exists {
msg := "Invalid file id"
return &appError{nil, msg, http.StatusBadRequest}
}
if !repo.IsEncrypted && len(byteRanges) != 0 {
if err := doFileRange(rsp, r, repo, objID, fileName, op, byteRanges, user); err != nil {
return err
}
} else if err := doFile(rsp, r, repo, objID, fileName, op, cryptKey, user); err != nil {
return err
}
return nil
}
func parseCryptKey(rsp http.ResponseWriter, repoID string, user string, version int) (*seafileCrypt, *appError) {
key, err := rpcclient.Call("seafile_get_decrypt_key", repoID, user)
if err != nil {
errMessage := "Repo is encrypted. Please provide password to view it."
return nil, &appError{nil, errMessage, http.StatusBadRequest}
}
cryptKey, ok := key.(map[string]interface{})
if !ok {
err := fmt.Errorf("failed to assert crypt key")
return nil, &appError{err, "", http.StatusInternalServerError}
}
seafileKey := new(seafileCrypt)
seafileKey.version = version
if cryptKey != nil {
key, ok := cryptKey["key"].(string)
if !ok {
err := fmt.Errorf("failed to parse crypt key")
return nil, &appError{err, "", http.StatusInternalServerError}
}
iv, ok := cryptKey["iv"].(string)
if !ok {
err := fmt.Errorf("failed to parse crypt iv")
return nil, &appError{err, "", http.StatusInternalServerError}
}
seafileKey.key, err = hex.DecodeString(key)
if err != nil {
err := fmt.Errorf("failed to decode key: %v", err)
return nil, &appError{err, "", http.StatusInternalServerError}
}
seafileKey.iv, err = hex.DecodeString(iv)
if err != nil {
err := fmt.Errorf("failed to decode iv: %v", err)
return nil, &appError{err, "", http.StatusInternalServerError}
}
}
return seafileKey, nil
}
func accessV2CB(rsp http.ResponseWriter, r *http.Request) *appError {
vars := mux.Vars(r)
repoID := vars["repoid"]
filePath := vars["filepath"]
if filePath == "" {
msg := "No file path\n"
return &appError{nil, msg, http.StatusBadRequest}
}
rpath := getCanonPath(filePath)
fileName := filepath.Base(rpath)
op := r.URL.Query().Get("op")
if op != "view" && op != "download" {
msg := "Operation is neither view or download\n"
return &appError{nil, msg, http.StatusBadRequest}
}
token := utils.GetAuthorizationToken(r.Header)
cookie := r.Header.Get("Cookie")
if token == "" && cookie == "" {
msg := "Both token and cookie are not set\n"
return &appError{nil, msg, http.StatusBadRequest}
}
ipAddr := getClientIPAddr(r)
userAgent := r.Header.Get("User-Agent")
user, appErr := checkFileAccess(repoID, token, cookie, filePath, "download", ipAddr, userAgent)
if appErr != nil {
return appErr
}
repo := repomgr.Get(repoID)
if repo == nil {
msg := "Bad repo id"
return &appError{nil, msg, http.StatusBadRequest}
}
fileID, _, err := fsmgr.GetObjIDByPath(repo.StoreID, repo.RootID, rpath)
if err != nil {
msg := "Invalid file_path\n"
return &appError{nil, msg, http.StatusBadRequest}
}
etag := r.Header.Get("If-None-Match")
if etag == fileID {
return &appError{nil, "", http.StatusNotModified}
}
rsp.Header().Set("ETag", fileID)
rsp.Header().Set("Cache-Control", "private, no-cache")
ranges := r.Header["Range"]
byteRanges := strings.Join(ranges, "")
var cryptKey *seafileCrypt
if repo.IsEncrypted {
key, err := parseCryptKey(rsp, repoID, user, repo.EncVersion)
if err != nil {
return err
}
cryptKey = key
}
exists, _ := fsmgr.Exists(repo.StoreID, fileID)
if !exists {
msg := "Invalid file id"
return &appError{nil, msg, http.StatusBadRequest}
}
if !repo.IsEncrypted && len(byteRanges) != 0 {
if err := doFileRange(rsp, r, repo, fileID, fileName, op, byteRanges, user); err != nil {
return err
}
} else if err := doFile(rsp, r, repo, fileID, fileName, op, cryptKey, user); err != nil {
return err
}
return nil
}
type UserInfo struct {
User string `json:"user"`
}
func checkFileAccess(repoID, token, cookie, filePath, op, ipAddr, userAgent string) (string, *appError) {
tokenString, err := utils.GenSeahubJWTToken()
if err != nil {
err := fmt.Errorf("failed to sign jwt token: %v", err)
return "", &appError{err, "", http.StatusInternalServerError}
}
url := fmt.Sprintf("%s/repos/%s/check-access/", option.SeahubURL, repoID)
header := map[string][]string{
"Authorization": {"Token " + tokenString},
}
if cookie != "" {
header["Cookie"] = []string{cookie}
}
req := make(map[string]string)
req["op"] = op
req["path"] = filePath
if token != "" {
req["token"] = token
}
if ipAddr != "" {
req["ip_addr"] = ipAddr
}
if userAgent != "" {
req["user_agent"] = userAgent
}
msg, err := json.Marshal(req)
if err != nil {
err := fmt.Errorf("failed to encode access token: %v", err)
return "", &appError{err, "", http.StatusInternalServerError}
}
status, body, err := utils.HttpCommon("POST", url, header, bytes.NewReader(msg))
if err != nil {
if status != http.StatusInternalServerError {
return "", &appError{nil, string(body), status}
} else {
err := fmt.Errorf("failed to get access token info: %v", err)
return "", &appError{err, "", http.StatusInternalServerError}
}
}
info := new(UserInfo)
err = json.Unmarshal(body, &info)
if err != nil {
err := fmt.Errorf("failed to decode access token info: %v", err)
return "", &appError{err, "", http.StatusInternalServerError}
}
return info.User, nil
}
func doFile(rsp http.ResponseWriter, r *http.Request, repo *repomgr.Repo, fileID string,
fileName string, operation string, cryptKey *seafileCrypt, user string) *appError {
file, err := fsmgr.GetSeafile(repo.StoreID, fileID)
if err != nil {
msg := "Failed to get seafile"
return &appError{nil, msg, http.StatusBadRequest}
}
rsp.Header().Set("Access-Control-Allow-Origin", "*")
fileType := parseContentType(fileName)
if fileType == "image/svg+xml" {
rsp.Header().Set("Content-Security-Policy", "sandbox")
}
setCommonHeaders(rsp, r, operation, fileName)
//filesize string
fileSize := fmt.Sprintf("%d", file.FileSize)
rsp.Header().Set("Content-Length", fileSize)
if r.Method == "HEAD" {
rsp.WriteHeader(http.StatusOK)
return nil
}
if file.FileSize == 0 {
rsp.WriteHeader(http.StatusOK)
return nil
}
if cryptKey != nil {
for _, blkID := range file.BlkIDs {
var buf bytes.Buffer
blockmgr.Read(repo.StoreID, blkID, &buf)
decoded, err := cryptKey.decrypt(buf.Bytes())
if err != nil {
err := fmt.Errorf("failed to decrypt block %s: %v", blkID, err)
return &appError{err, "", http.StatusInternalServerError}
}
_, err = rsp.Write(decoded)
if err != nil {
return nil
}
}
return nil
}
for _, blkID := range file.BlkIDs {
err := blockmgr.Read(repo.StoreID, blkID, rsp)
if err != nil {
if !isNetworkErr(err) {
log.Errorf("failed to read block %s: %v", blkID, err)
}
return nil
}
}
oper := "web-file-download"
if operation == "download-link" {
oper = "link-file-download"
}
sendStatisticMsg(repo.StoreID, user, oper, file.FileSize)
return nil
}
func isNetworkErr(err error) bool {
_, ok := err.(net.Error)
return ok
}
type blockMap struct {
blkSize []uint64
expireTime int64
}
func doFileRange(rsp http.ResponseWriter, r *http.Request, repo *repomgr.Repo, fileID string,
fileName string, operation string, byteRanges string, user string) *appError {
file, err := fsmgr.GetSeafile(repo.StoreID, fileID)
if err != nil {
msg := "Failed to get seafile"
return &appError{nil, msg, http.StatusBadRequest}
}
if file.FileSize == 0 {
rsp.WriteHeader(http.StatusOK)
return nil
}
start, end, ok := parseRange(byteRanges, file.FileSize)
if !ok {
conRange := fmt.Sprintf("bytes */%d", file.FileSize)
rsp.Header().Set("Content-Range", conRange)
return &appError{nil, "", http.StatusRequestedRangeNotSatisfiable}
}
rsp.Header().Set("Accept-Ranges", "bytes")
fileType := parseContentType(fileName)
if fileType == "image/svg+xml" {
rsp.Header().Set("Content-Security-Policy", "sandbox")
}
setCommonHeaders(rsp, r, operation, fileName)
//filesize string
conLen := fmt.Sprintf("%d", end-start+1)
rsp.Header().Set("Content-Length", conLen)
conRange := fmt.Sprintf("bytes %d-%d/%d", start, end, file.FileSize)
rsp.Header().Set("Content-Range", conRange)
rsp.WriteHeader(http.StatusPartialContent)
var blkSize []uint64
if file.FileSize > cacheBlockMapThreshold {
if v, ok := blockMapCacheTable.Load(file.FileID); ok {
if blkMap, ok := v.(*blockMap); ok {
blkSize = blkMap.blkSize
}
}
if len(blkSize) == 0 {
for _, v := range file.BlkIDs {
size, err := blockmgr.Stat(repo.StoreID, v)
if err != nil {
err := fmt.Errorf("failed to stat block %s : %v", v, err)
return &appError{err, "", http.StatusInternalServerError}
}
blkSize = append(blkSize, uint64(size))
}
blockMapCacheTable.Store(file.FileID, &blockMap{blkSize, time.Now().Unix() + blockMapCacheExpiretime})
}
} else {
for _, v := range file.BlkIDs {
size, err := blockmgr.Stat(repo.StoreID, v)
if err != nil {
err := fmt.Errorf("failed to stat block %s : %v", v, err)
return &appError{err, "", http.StatusInternalServerError}
}
blkSize = append(blkSize, uint64(size))
}
}
var off uint64
var pos uint64
var startBlock int
for i, v := range blkSize {
pos = start - off
off += v
if off > start {
startBlock = i
break
}
}
// Read block from the start block and specified position
var i int
for ; i < len(file.BlkIDs); i++ {
if i < startBlock {
continue
}
blkID := file.BlkIDs[i]
var buf bytes.Buffer
if end-start+1 <= blkSize[i]-pos {
err := blockmgr.Read(repo.StoreID, blkID, &buf)
if err != nil {
if !isNetworkErr(err) {
log.Errorf("failed to read block %s: %v", blkID, err)
}
return nil
}
recvBuf := buf.Bytes()
rsp.Write(recvBuf[pos : pos+end-start+1])
return nil
}
err := blockmgr.Read(repo.StoreID, blkID, &buf)
if err != nil {
if !isNetworkErr(err) {
log.Errorf("failed to read block %s: %v", blkID, err)
}
return nil
}
recvBuf := buf.Bytes()
_, err = rsp.Write(recvBuf[pos:])
if err != nil {
return nil
}
start += blkSize[i] - pos
i++
break
}
// Always read block from the remaining block and pos=0
for ; i < len(file.BlkIDs); i++ {
blkID := file.BlkIDs[i]
var buf bytes.Buffer
if end-start+1 <= blkSize[i] {
err := blockmgr.Read(repo.StoreID, blkID, &buf)
if err != nil {
if !isNetworkErr(err) {
log.Errorf("failed to read block %s: %v", blkID, err)
}
return nil
}
recvBuf := buf.Bytes()
_, err = rsp.Write(recvBuf[:end-start+1])
if err != nil {
return nil
}
break
} else {
err := blockmgr.Read(repo.StoreID, blkID, rsp)
if err != nil {
if !isNetworkErr(err) {
log.Errorf("failed to read block %s: %v", blkID, err)
}
return nil
}
start += blkSize[i]
}
}
oper := "web-file-download"
if operation == "download-link" {
oper = "link-file-download"
}
sendStatisticMsg(repo.StoreID, user, oper, end-start+1)
return nil
}
func parseRange(byteRanges string, fileSize uint64) (uint64, uint64, bool) {
start := strings.Index(byteRanges, "=")
end := strings.Index(byteRanges, "-")
if end < 0 {
return 0, 0, false
}
var startByte, endByte uint64
if start+1 == end {
retByte, err := strconv.ParseUint(byteRanges[end+1:], 10, 64)
if err != nil || retByte == 0 {
return 0, 0, false
}
startByte = fileSize - retByte
endByte = fileSize - 1
} else if end+1 == len(byteRanges) {
firstByte, err := strconv.ParseUint(byteRanges[start+1:end], 10, 64)
if err != nil {
return 0, 0, false
}
startByte = firstByte
endByte = fileSize - 1
} else {
firstByte, err := strconv.ParseUint(byteRanges[start+1:end], 10, 64)
if err != nil {
return 0, 0, false
}
lastByte, err := strconv.ParseUint(byteRanges[end+1:], 10, 64)
if err != nil {
return 0, 0, false
}
if lastByte > fileSize-1 {
lastByte = fileSize - 1
}
startByte = firstByte
endByte = lastByte
}
if startByte > endByte {
return 0, 0, false
}
return startByte, endByte, true
}
func setCommonHeaders(rsp http.ResponseWriter, r *http.Request, operation, fileName string) {
fileType := parseContentType(fileName)
if fileType != "" {
var contentType string
if strings.Contains(fileType, "text") {
contentType = fileType + "; " + "charset=gbk"
} else {
contentType = fileType
}
rsp.Header().Set("Content-Type", contentType)
} else {
rsp.Header().Set("Content-Type", "application/octet-stream")
}
var contFileName string
if operation == "download" || operation == "download-link" ||
operation == "downloadblks" {
// Since the file name downloaded by safari will be garbled, we need to encode the filename.
// Safari cannot parse unencoded utf8 characters.
contFileName = fmt.Sprintf("attachment;filename*=utf-8''%s;filename=\"%s\"", url.PathEscape(fileName), fileName)
} else {
contFileName = fmt.Sprintf("inline;filename*=utf-8''%s;filename=\"%s\"", url.PathEscape(fileName), fileName)
}
rsp.Header().Set("Content-Disposition", contFileName)
if fileType != "image/jpg" {
rsp.Header().Set("X-Content-Type-Options", "nosniff")
}
}
func accessBlksCB(rsp http.ResponseWriter, r *http.Request) *appError {
parts := strings.Split(r.URL.Path[1:], "/")
if len(parts) < 3 {
msg := "Invalid URL"
return &appError{nil, msg, http.StatusBadRequest}
}
token := parts[1]
blkID := parts[2]
accessInfo, err := parseWebaccessInfo(token)
if err != nil {
return err
}
repoID := accessInfo.repoID
op := accessInfo.op
user := accessInfo.user
id := accessInfo.objID
if _, ok := r.Header["If-Modified-Since"]; ok {
return &appError{nil, "", http.StatusNotModified}
}
now := time.Now()
rsp.Header().Set("Last-Modified", now.Format("Mon, 2 Jan 2006 15:04:05 GMT"))
rsp.Header().Set("Cache-Control", "max-age=3600")
repo := repomgr.Get(repoID)
if repo == nil {
msg := "Bad repo id"
return &appError{nil, msg, http.StatusBadRequest}
}
exists, _ := fsmgr.Exists(repo.StoreID, id)
if !exists {
msg := "Invalid file id"
return &appError{nil, msg, http.StatusBadRequest}
}
if op != "downloadblks" {
msg := "Operation does not match access token"
return &appError{nil, msg, http.StatusForbidden}
}
if err := doBlock(rsp, r, repo, id, user, blkID); err != nil {
return err
}
return nil
}
func doBlock(rsp http.ResponseWriter, r *http.Request, repo *repomgr.Repo, fileID string,
user string, blkID string) *appError {
file, err := fsmgr.GetSeafile(repo.StoreID, fileID)
if err != nil {
msg := "Failed to get seafile"
return &appError{nil, msg, http.StatusBadRequest}
}
var found bool
for _, id := range file.BlkIDs {
if id == blkID {
found = true
break
}
}
if !found {
rsp.WriteHeader(http.StatusBadRequest)
return nil
}
exists := blockmgr.Exists(repo.StoreID, blkID)
if !exists {
rsp.WriteHeader(http.StatusBadRequest)
return nil
}
rsp.Header().Set("Access-Control-Allow-Origin", "*")
setCommonHeaders(rsp, r, "downloadblks", blkID)
size, err := blockmgr.Stat(repo.StoreID, blkID)
if err != nil {
msg := "Failed to stat block"
return &appError{nil, msg, http.StatusBadRequest}
}
if size == 0 {
rsp.WriteHeader(http.StatusOK)
return nil
}
fileSize := fmt.Sprintf("%d", size)
rsp.Header().Set("Content-Length", fileSize)
err = blockmgr.Read(repo.StoreID, blkID, rsp)
if err != nil {
if !isNetworkErr(err) {
log.Errorf("failed to read block %s: %v", blkID, err)
}
}
sendStatisticMsg(repo.StoreID, user, "web-file-download", uint64(size))
return nil
}
func accessZipCB(rsp http.ResponseWriter, r *http.Request) *appError {
parts := strings.Split(r.URL.Path[1:], "/")
if len(parts) != 2 {
msg := "Invalid URL"
return &appError{nil, msg, http.StatusBadRequest}
}
token := parts[1]
accessInfo, err := parseWebaccessInfo(token)
if err != nil {
return err
}
repoID := accessInfo.repoID
op := accessInfo.op
user := accessInfo.user
data := accessInfo.objID
if op != "download-dir" && op != "download-dir-link" &&
op != "download-multi" && op != "download-multi-link" {
msg := "Operation does not match access token"
return &appError{nil, msg, http.StatusForbidden}
}
if _, ok := r.Header["If-Modified-Since"]; ok {
return &appError{nil, "", http.StatusNotModified}
}
now := time.Now()
rsp.Header().Set("Last-Modified", now.Format("Mon, 2 Jan 2006 15:04:05 GMT"))
rsp.Header().Set("Cache-Control", "max-age=3600")
if err := downloadZipFile(rsp, r, data, repoID, user, op); err != nil {
return err
}
return nil
}
func downloadZipFile(rsp http.ResponseWriter, r *http.Request, data, repoID, user, op string) *appError {
repo := repomgr.Get(repoID)
if repo == nil {
msg := "Failed to get repo"
return &appError{nil, msg, http.StatusBadRequest}
}
var cryptKey *seafileCrypt
if repo.IsEncrypted {
key, err := parseCryptKey(rsp, repoID, user, repo.EncVersion)
if err != nil {
return err
}
cryptKey = key
}
obj := make(map[string]interface{})
err := json.Unmarshal([]byte(data), &obj)
if err != nil {
err := fmt.Errorf("failed to parse obj data for zip: %v", err)
return &appError{err, "", http.StatusInternalServerError}
}
ar := zip.NewWriter(rsp)
defer ar.Close()
if op == "download-dir" || op == "download-dir-link" {
dirName, ok := obj["dir_name"].(string)
if !ok || dirName == "" {
err := fmt.Errorf("invalid download dir data: miss dir_name field")
return &appError{err, "", http.StatusInternalServerError}
}
objID, ok := obj["obj_id"].(string)
if !ok || objID == "" {
err := fmt.Errorf("invalid download dir data: miss obj_id field")
return &appError{err, "", http.StatusInternalServerError}
}
zipName := dirName + ".zip"
setCommonHeaders(rsp, r, "download", zipName)
// The zip name downloaded by safari will be garbled if we encode the zip name,
// because we download zip file using chunk encoding.
contFileName := fmt.Sprintf("attachment;filename=\"%s\";filename*=utf-8''%s", zipName, url.PathEscape(zipName))
rsp.Header().Set("Content-Disposition", contFileName)
rsp.Header().Set("Content-Type", "application/octet-stream")
err := packDir(ar, repo, objID, dirName, cryptKey)
if err != nil {
log.Errorf("failed to pack dir %s: %v", dirName, err)
return nil
}
} else {
dirList, err := parseDirFilelist(repo, obj)
if err != nil {
return &appError{err, "", http.StatusInternalServerError}
}
now := time.Now()
zipName := fmt.Sprintf("documents-export-%d-%d-%d.zip", now.Year(), now.Month(), now.Day())
setCommonHeaders(rsp, r, "download", zipName)
contFileName := fmt.Sprintf("attachment;filename=\"%s\";filename*=utf8''%s", zipName, url.PathEscape(zipName))
rsp.Header().Set("Content-Disposition", contFileName)
rsp.Header().Set("Content-Type", "application/octet-stream")
fileList := []string{}
for _, v := range dirList {
uniqueName := genUniqueFileName(v.Name, fileList)
fileList = append(fileList, uniqueName)
if fsmgr.IsDir(v.Mode) {
if err := packDir(ar, repo, v.ID, uniqueName, cryptKey); err != nil {
if !isNetworkErr(err) {
log.Errorf("failed to pack dir %s: %v", v.Name, err)
}
return nil
}
} else {
if err := packFiles(ar, &v, repo, "", uniqueName, cryptKey); err != nil {
if !isNetworkErr(err) {
log.Errorf("failed to pack file %s: %v", v.Name, err)
}
return nil
}
}
}
}
return nil
}
func genUniqueFileName(fileName string, fileList []string) string {
var uniqueName string
var name string
i := 1
dot := strings.LastIndex(fileName, ".")
if dot < 0 {
name = fileName
} else {
name = fileName[:dot]
}
uniqueName = fileName
for nameInFileList(uniqueName, fileList) {
if dot < 0 {
uniqueName = fmt.Sprintf("%s (%d)", name, i)
} else {
uniqueName = fmt.Sprintf("%s (%d).%s", name, i, fileName[dot+1:])
}
i++
}
return uniqueName
}
func nameInFileList(fileName string, fileList []string) bool {
for _, name := range fileList {
if name == fileName {
return true
}
}
return false
}
func parseDirFilelist(repo *repomgr.Repo, obj map[string]interface{}) ([]fsmgr.SeafDirent, error) {
parentDir, ok := obj["parent_dir"].(string)
if !ok || parentDir == "" {
err := fmt.Errorf("invalid download multi data, miss parent_dir field")
return nil, err
}
dir, err := fsmgr.GetSeafdirByPath(repo.StoreID, repo.RootID, parentDir)
if err != nil {
err := fmt.Errorf("failed to get dir %s repo %s", parentDir, repo.StoreID)
return nil, err
}
fileList, ok := obj["file_list"].([]interface{})
if !ok || fileList == nil {
err := fmt.Errorf("invalid download multi data, miss file_list field")
return nil, err
}
direntHash := make(map[string]fsmgr.SeafDirent)
for _, v := range dir.Entries {
direntHash[v.Name] = *v
}
direntList := make([]fsmgr.SeafDirent, 0)
for _, fileName := range fileList {
name, ok := fileName.(string)
if !ok {
err := fmt.Errorf("invalid download multi data")
return nil, err
}
if name == "" {
err := fmt.Errorf("invalid download file name")
return nil, err
}
if strings.Contains(name, "/") {
rpath := filepath.Join(parentDir, name)
dent, err := fsmgr.GetDirentByPath(repo.StoreID, repo.RootID, rpath)
if err != nil {
err := fmt.Errorf("failed to get path %s for repo %s: %v", rpath, repo.StoreID, err)
return nil, err
}
direntList = append(direntList, *dent)
} else {
v, ok := direntHash[name]
if !ok {
err := fmt.Errorf("invalid download multi data")
return nil, err
}
direntList = append(direntList, v)
}
}
return direntList, nil
}
func packDir(ar *zip.Writer, repo *repomgr.Repo, dirID, dirPath string, cryptKey *seafileCrypt) error {
dirent, err := fsmgr.GetSeafdir(repo.StoreID, dirID)
if err != nil {
err := fmt.Errorf("failed to get dir for zip: %v", err)
return err
}
if dirent.Entries == nil {
fileDir := filepath.Join(dirPath)
fileDir = strings.TrimLeft(fileDir, "/")
_, err := ar.Create(fileDir + "/")
if err != nil {
err := fmt.Errorf("failed to create zip dir: %v", err)
return err
}
return nil
}
entries := dirent.Entries
for _, v := range entries {
fileDir := filepath.Join(dirPath, v.Name)
fileDir = strings.TrimLeft(fileDir, "/")
if fsmgr.IsDir(v.Mode) {
if err := packDir(ar, repo, v.ID, fileDir, cryptKey); err != nil {
return err
}
} else {
if err := packFiles(ar, v, repo, dirPath, v.Name, cryptKey); err != nil {
return err
}
}
}
return nil
}
func packFiles(ar *zip.Writer, dirent *fsmgr.SeafDirent, repo *repomgr.Repo, parentPath, baseName string, cryptKey *seafileCrypt) error {
file, err := fsmgr.GetSeafile(repo.StoreID, dirent.ID)
if err != nil {
err := fmt.Errorf("failed to get seafile : %v", err)
return err
}
filePath := filepath.Join(parentPath, baseName)
filePath = strings.TrimLeft(filePath, "/")
fileHeader := new(zip.FileHeader)
fileHeader.Name = filePath
fileHeader.Modified = time.Unix(dirent.Mtime, 0)
fileHeader.Method = zip.Deflate
zipFile, err := ar.CreateHeader(fileHeader)
if err != nil {
err := fmt.Errorf("failed to create zip file : %v", err)
return err
}
if cryptKey != nil {
for _, blkID := range file.BlkIDs {
var buf bytes.Buffer
blockmgr.Read(repo.StoreID, blkID, &buf)
decoded, err := cryptKey.decrypt(buf.Bytes())
if err != nil {
err := fmt.Errorf("failed to decrypt block %s: %v", blkID, err)
return err
}
_, err = zipFile.Write(decoded)
if err != nil {
return err
}
}
return nil
}
for _, blkID := range file.BlkIDs {
err := blockmgr.Read(repo.StoreID, blkID, zipFile)
if err != nil {
return err
}
}
return nil
}
type recvData struct {
parentDir string
tokenType string
repoID string
user string
rstart int64
rend int64
fsize int64
fileNames []string
files []string
fileHeaders []*multipart.FileHeader
}
func uploadAPICB(rsp http.ResponseWriter, r *http.Request) *appError {
if r.Method == "OPTIONS" {
setAccessControl(rsp)
rsp.WriteHeader(http.StatusOK)
return nil
}
fsm, err := parseUploadHeaders(r)
if err != nil {
formatJSONError(rsp, err)
return err
}
if err := doUpload(rsp, r, fsm, false); err != nil {
formatJSONError(rsp, err)
return err
}
return nil
}
func setAccessControl(rsp http.ResponseWriter) {
rsp.Header().Set("Access-Control-Allow-Origin", "*")
rsp.Header().Set("Access-Control-Allow-Headers", "x-requested-with, content-type, content-range, content-disposition, accept, origin, authorization")
rsp.Header().Set("Access-Control-Allow-Methods", "GET, POST, PUT, PATCH, DELETE, OPTIONS")
rsp.Header().Set("Access-Control-Max-Age", "86400")
}
func uploadAjaxCB(rsp http.ResponseWriter, r *http.Request) *appError {
if r.Method == "OPTIONS" {
setAccessControl(rsp)
rsp.WriteHeader(http.StatusOK)
return nil
}
fsm, err := parseUploadHeaders(r)
if err != nil {
formatJSONError(rsp, err)
return err
}
if err := doUpload(rsp, r, fsm, true); err != nil {
formatJSONError(rsp, err)
return err
}
return nil
}
func formatJSONError(rsp http.ResponseWriter, err *appError) {
if err.Message != "" {
rsp.Header().Set("Content-Type", "application/json; charset=utf-8")
err.Message = fmt.Sprintf("{\"error\": \"%s\"}", err.Message)
}
}
func normalizeUTF8Path(p string) string {
newPath := norm.NFC.Bytes([]byte(p))
return string(newPath)
}
func doUpload(rsp http.ResponseWriter, r *http.Request, fsm *recvData, isAjax bool) *appError {
setAccessControl(rsp)
if err := r.ParseMultipartForm(1 << 20); err != nil {
return &appError{nil, "", http.StatusBadRequest}
}
defer r.MultipartForm.RemoveAll()
repoID := fsm.repoID
user := fsm.user
replaceStr := r.FormValue("replace")
var replaceExisted bool
if replaceStr != "" {
replace, err := strconv.ParseInt(replaceStr, 10, 64)
if err != nil || (replace != 0 && replace != 1) {
msg := "Invalid argument replace.\n"
return &appError{nil, msg, http.StatusBadRequest}
}
if replace == 1 {
replaceExisted = true
}
}
parentDir := normalizeUTF8Path(r.FormValue("parent_dir"))
if parentDir == "" {
msg := "No parent_dir given.\n"
return &appError{nil, msg, http.StatusBadRequest}
}
lastModifyStr := normalizeUTF8Path(r.FormValue("last_modify"))
var lastModify int64
if lastModifyStr != "" {
t, err := time.Parse(time.RFC3339, lastModifyStr)
if err == nil {
lastModify = t.Unix()
}
}
relativePath := normalizeUTF8Path(r.FormValue("relative_path"))
if relativePath != "" {
if relativePath[0] == '/' || relativePath[0] == '\\' {
msg := "Invalid relative path"
return &appError{nil, msg, http.StatusBadRequest}
}
}
newParentDir := filepath.Join("/", parentDir, relativePath)
defer clearTmpFile(fsm, newParentDir)
if fsm.rstart >= 0 {
if parentDir[0] != '/' {
msg := "Invalid parent dir"
return &appError{nil, msg, http.StatusBadRequest}
}
formFiles := r.MultipartForm.File
files, ok := formFiles["file"]
if !ok {
msg := "No file in multipart form.\n"
return &appError{nil, msg, http.StatusBadRequest}
}
if len(files) > 1 {
msg := "More files in one request"
return &appError{nil, msg, http.StatusBadRequest}
}
err := writeBlockDataToTmpFile(r, fsm, formFiles, repoID, newParentDir)
if err != nil {
msg := "Internal error.\n"
err := fmt.Errorf("failed to write block data to tmp file: %v", err)
return &appError{err, msg, http.StatusInternalServerError}
}
if fsm.rend != fsm.fsize-1 {
rsp.Header().Set("Content-Type", "application/json; charset=utf-8")
success := "{\"success\": true}"
rsp.Write([]byte(success))
return nil
}
} else {
formFiles := r.MultipartForm.File
fileHeaders, ok := formFiles["file"]
if !ok {
msg := "No file in multipart form.\n"
return &appError{nil, msg, http.StatusBadRequest}
}
for _, handler := range fileHeaders {
fileName := filepath.Base(handler.Filename)
fsm.fileNames = append(fsm.fileNames, normalizeUTF8Path(fileName))
fsm.fileHeaders = append(fsm.fileHeaders, handler)
}
}
if fsm.fileNames == nil {
msg := "No file uploaded.\n"
return &appError{nil, msg, http.StatusBadRequest}
}
if err := checkParentDir(repoID, parentDir); err != nil {
return err
}
if !isParentMatched(fsm.parentDir, parentDir) {
msg := "Parent dir doesn't match."
return &appError{nil, msg, http.StatusForbidden}
}
if err := checkTmpFileList(fsm); err != nil {
return err
}
var contentLen int64
if fsm.fsize > 0 {
contentLen = fsm.fsize
} else {
lenstr := r.Header.Get("Content-Length")
if lenstr == "" {
contentLen = -1
} else {
tmpLen, err := strconv.ParseInt(lenstr, 10, 64)
if err != nil {
msg := "Internal error.\n"
err := fmt.Errorf("failed to parse content len: %v", err)
return &appError{err, msg, http.StatusInternalServerError}
}
contentLen = tmpLen
}
}
ret, err := checkQuota(repoID, contentLen)
if err != nil {
msg := "Internal error.\n"
err := fmt.Errorf("failed to check quota: %v", err)
return &appError{err, msg, http.StatusInternalServerError}
}
if ret == 1 {
msg := "Out of quota.\n"
return &appError{nil, msg, seafHTTPResNoQuota}
}
if err := createRelativePath(repoID, parentDir, relativePath, user); err != nil {
return err
}
if err := postMultiFiles(rsp, r, repoID, newParentDir, user, fsm,
replaceExisted, lastModify, isAjax); err != nil {
return err
}
oper := "web-file-upload"
if fsm.tokenType == "upload-link" {
oper = "link-file-upload"
}
sendStatisticMsg(repoID, user, oper, uint64(contentLen))
return nil
}
func writeBlockDataToTmpFile(r *http.Request, fsm *recvData, formFiles map[string][]*multipart.FileHeader,
repoID, parentDir string) error {
httpTempDir := filepath.Join(absDataDir, "httptemp")
fileHeaders, ok := formFiles["file"]
if !ok {
err := fmt.Errorf("failed to get file from multipart form")
return err
}
filename, err := getFileNameFromMimeHeader(r)
if err != nil {
return fmt.Errorf("failed to get filename from mime header: %w", err)
}
handler := fileHeaders[0]
file, err := handler.Open()
if err != nil {
err := fmt.Errorf("failed to open file for read: %v", err)
return err
}
defer file.Close()
var f *os.File
filePath := filepath.Join("/", parentDir, filename)
tmpFile, err := repomgr.GetUploadTmpFile(repoID, filePath)
if err != nil || tmpFile == "" {
tmpDir := filepath.Join(httpTempDir, "cluster-shared")
f, err = os.CreateTemp(tmpDir, filename)
if err != nil {
return err
}
repomgr.AddUploadTmpFile(repoID, filePath, f.Name())
tmpFile = f.Name()
} else {
f, err = os.OpenFile(tmpFile, os.O_WRONLY|os.O_CREATE, 0666)
if err != nil {
return err
}
}
if fsm.rend == fsm.fsize-1 {
fsm.fileNames = append(fsm.fileNames, filepath.Base(filename))
fsm.files = append(fsm.files, tmpFile)
}
f.Seek(fsm.rstart, 0)
io.Copy(f, file)
f.Close()
return nil
}
func getFileNameFromMimeHeader(r *http.Request) (string, error) {
disposition := r.Header.Get("Content-Disposition")
if disposition == "" {
err := fmt.Errorf("missing content disposition")
return "", err
}
_, params, err := mime.ParseMediaType(disposition)
if err != nil {
err := fmt.Errorf("failed to parse Content-Disposition: %v", err)
return "", err
}
filename, err := url.QueryUnescape(params["filename"])
if err != nil {
err := fmt.Errorf("failed to get filename: %v", err)
return "", err
}
return normalizeUTF8Path(filename), nil
}
func createRelativePath(repoID, parentDir, relativePath, user string) *appError {
if relativePath == "" {
return nil
}
err := mkdirWithParents(repoID, parentDir, relativePath, user)
if err != nil {
msg := "Internal error.\n"
err := fmt.Errorf("Failed to create parent directory: %v", err)
return &appError{err, msg, http.StatusInternalServerError}
}
return nil
}
func mkdirWithParents(repoID, parentDir, newDirPath, user string) error {
repo := repomgr.Get(repoID)
if repo == nil {
err := fmt.Errorf("failed to get repo %s", repoID)
return err
}
headCommit, err := commitmgr.Load(repo.ID, repo.HeadCommitID)
if err != nil {
err := fmt.Errorf("failed to get head commit for repo %s", repo.ID)
return err
}
relativeDirCan := getCanonPath(newDirPath)
subFolders := strings.Split(relativeDirCan, "/")
for _, name := range subFolders {
if name == "" {
continue
}
if shouldIgnoreFile(name) {
err := fmt.Errorf("invalid dir name %s", name)
return err
}
}
var rootID string
var parentDirCan string
if parentDir == "/" || parentDir == "\\" {
parentDirCan = "/"
} else {
parentDirCan = getCanonPath(parentDir)
}
absPath, dirID, err := checkAndCreateDir(repo, headCommit.RootID, parentDirCan, subFolders)
if err != nil {
err := fmt.Errorf("failed to check and create dir: %v", err)
return err
}
if absPath == "" {
return nil
}
newRootID := headCommit.RootID
mtime := time.Now().Unix()
mode := (syscall.S_IFDIR | 0644)
dent := fsmgr.NewDirent(dirID, filepath.Base(absPath), uint32(mode), mtime, "", 0)
var names []string
rootID, _ = doPostMultiFiles(repo, newRootID, filepath.Dir(absPath), []*fsmgr.SeafDirent{dent}, user, false, &names)
if rootID == "" {
err := fmt.Errorf("failed to put dir")
return err
}
buf := fmt.Sprintf("Added directory \"%s\"", relativeDirCan)
_, err = genNewCommit(repo, headCommit, rootID, user, buf, true, "", false)
if err != nil {
err := fmt.Errorf("failed to generate new commit: %v", err)
return err
}
go mergeVirtualRepoPool.AddTask(repo.ID, "")
return nil
}
func checkAndCreateDir(repo *repomgr.Repo, rootID, parentDir string, subFolders []string) (string, string, error) {
storeID := repo.StoreID
dir, err := fsmgr.GetSeafdirByPath(storeID, rootID, parentDir)
if err != nil {
err := fmt.Errorf("parent_dir %s doesn't exist in repo %s", parentDir, storeID)
return "", "", err
}
entries := dir.Entries
var exists bool
var absPath string
var dirList []string
for i, dirName := range subFolders {
for _, de := range entries {
if de.Name == dirName {
exists = true
subDir, err := fsmgr.GetSeafdir(storeID, de.ID)
if err != nil {
err := fmt.Errorf("failed to get seaf dir: %v", err)
return "", "", err
}
entries = subDir.Entries
break
}
}
if !exists {
relativePath := filepath.Join(subFolders[:i+1]...)
absPath = filepath.Join(parentDir, relativePath)
dirList = subFolders[i:]
break
}
exists = false
}
if dirList != nil {
dirList = dirList[1:]
}
if len(dirList) == 0 {
return absPath, "", nil
}
dirID, err := genDirRecursive(repo, dirList)
if err != nil {
err := fmt.Errorf("failed to generate dir recursive: %v", err)
return "", "", err
}
return absPath, dirID, nil
}
func genDirRecursive(repo *repomgr.Repo, toPath []string) (string, error) {
if len(toPath) == 1 {
uniqueName := toPath[0]
mode := (syscall.S_IFDIR | 0644)
mtime := time.Now().Unix()
dent := fsmgr.NewDirent("", uniqueName, uint32(mode), mtime, "", 0)
newdir, err := fsmgr.NewSeafdir(1, []*fsmgr.SeafDirent{dent})
if err != nil {
err := fmt.Errorf("failed to new seafdir: %v", err)
return "", err
}
err = fsmgr.SaveSeafdir(repo.StoreID, newdir)
if err != nil {
err := fmt.Errorf("failed to save seafdir %s/%s", repo.ID, newdir.DirID)
return "", err
}
return newdir.DirID, nil
}
ret, err := genDirRecursive(repo, toPath[1:])
if err != nil {
err := fmt.Errorf("failed to generate dir recursive: %v", err)
return "", err
}
if ret != "" {
uniqueName := toPath[0]
mode := (syscall.S_IFDIR | 0644)
mtime := time.Now().Unix()
dent := fsmgr.NewDirent(ret, uniqueName, uint32(mode), mtime, "", 0)
newdir, err := fsmgr.NewSeafdir(1, []*fsmgr.SeafDirent{dent})
if err != nil {
err := fmt.Errorf("failed to new seafdir: %v", err)
return "", err
}
err = fsmgr.SaveSeafdir(repo.StoreID, newdir)
if err != nil {
err := fmt.Errorf("failed to save seafdir %s/%s", repo.ID, newdir.DirID)
return "", err
}
ret = newdir.DirID
}
return ret, nil
}
func clearTmpFile(fsm *recvData, parentDir string) {
if fsm.rstart >= 0 && fsm.rend == fsm.fsize-1 {
filePath := filepath.Join("/", parentDir, fsm.fileNames[0])
tmpFile, err := repomgr.GetUploadTmpFile(fsm.repoID, filePath)
if err == nil && tmpFile != "" {
os.Remove(tmpFile)
}
repomgr.DelUploadTmpFile(fsm.repoID, filePath)
}
}
func parseUploadHeaders(r *http.Request) (*recvData, *appError) {
tokenLen := 36
parts := strings.Split(r.URL.Path[1:], "/")
if len(parts) < 2 {
msg := "Invalid URL"
return nil, &appError{nil, msg, http.StatusBadRequest}
}
urlOp := parts[0]
if len(parts[1]) < tokenLen {
msg := "Invalid URL"
return nil, &appError{nil, msg, http.StatusBadRequest}
}
token := parts[1][:tokenLen]
accessInfo, appErr := parseWebaccessInfo(token)
if appErr != nil {
return nil, appErr
}
repoID := accessInfo.repoID
op := accessInfo.op
user := accessInfo.user
id := accessInfo.objID
status, err := repomgr.GetRepoStatus(repoID)
if err != nil {
return nil, &appError{err, "", http.StatusInternalServerError}
}
if status != repomgr.RepoStatusNormal && status != -1 {
msg := "Repo status not writable."
return nil, &appError{nil, msg, http.StatusBadRequest}
}
if op == "upload-link" {
op = "upload"
}
if strings.Index(urlOp, op) != 0 {
msg := "Operation does not match access token."
return nil, &appError{nil, msg, http.StatusForbidden}
}
fsm := new(recvData)
if op != "update" {
obj := make(map[string]interface{})
if err := json.Unmarshal([]byte(id), &obj); err != nil {
err := fmt.Errorf("failed to decode obj data : %v", err)
return nil, &appError{err, "", http.StatusInternalServerError}
}
parentDir, ok := obj["parent_dir"].(string)
if !ok || parentDir == "" {
err := fmt.Errorf("no parent_dir in access token")
return nil, &appError{err, "", http.StatusInternalServerError}
}
fsm.parentDir = parentDir
}
fsm.tokenType = accessInfo.op
fsm.repoID = repoID
fsm.user = user
fsm.rstart = -1
fsm.rend = -1
fsm.fsize = -1
ranges := r.Header.Get("Content-Range")
if ranges != "" {
parseContentRange(ranges, fsm)
}
var contentLen int64
lenstr := r.Header.Get("Content-Length")
if lenstr != "" {
conLen, _ := strconv.ParseInt(lenstr, 10, 64)
contentLen = conLen
if contentLen < 0 {
contentLen = 0
}
if fsm.fsize > 0 {
contentLen = fsm.fsize
}
}
if err := checkQuotaByContentLength(r, repoID, contentLen); err != nil {
return nil, err
}
if err := checkFileSizeByContentLength(r, contentLen); err != nil {
return nil, err
}
return fsm, nil
}
// Check whether the file to be uploaded would exceed the quota before receiving the body, in order to avoid unnecessarily receiving the body.
// After receiving the body, the quota is checked again to handle cases where the Content-Length in the request header is missing, which could make the initial quota check inaccurate.
func checkQuotaByContentLength(r *http.Request, repoID string, contentLen int64) *appError {
if r.Method != "PUT" && r.Method != "POST" {
return nil
}
ret, err := checkQuota(repoID, contentLen)
if err != nil {
msg := "Internal error.\n"
err := fmt.Errorf("failed to check quota: %v", err)
return &appError{err, msg, http.StatusInternalServerError}
}
if ret == 1 {
msg := "Out of quota.\n"
return &appError{nil, msg, seafHTTPResNoQuota}
}
return nil
}
func checkFileSizeByContentLength(r *http.Request, contentLen int64) *appError {
if r.Method != "PUT" && r.Method != "POST" {
return nil
}
if option.MaxUploadSize > 0 && uint64(contentLen) > option.MaxUploadSize {
msg := "File size is too large.\n"
return &appError{nil, msg, seafHTTPResTooLarge}
}
return nil
}
func postMultiFiles(rsp http.ResponseWriter, r *http.Request, repoID, parentDir, user string, fsm *recvData, replace bool, lastModify int64, isAjax bool) *appError {
fileNames := fsm.fileNames
files := fsm.files
repo := repomgr.Get(repoID)
if repo == nil {
msg := "Failed to get repo.\n"
err := fmt.Errorf("Failed to get repo %s", repoID)
return &appError{err, msg, http.StatusInternalServerError}
}
canonPath := getCanonPath(parentDir)
if !replace && checkFilesWithSameName(repo, canonPath, fileNames) {
msg := "Too many files with same name.\n"
return &appError{nil, msg, http.StatusBadRequest}
}
for _, fileName := range fileNames {
if shouldIgnoreFile(fileName) {
msg := fmt.Sprintf("invalid fileName: %s.\n", fileName)
return &appError{nil, msg, http.StatusBadRequest}
}
}
if strings.Contains(parentDir, "//") {
msg := "parent_dir contains // sequence.\n"
return &appError{nil, msg, http.StatusBadRequest}
}
var cryptKey *seafileCrypt
if repo.IsEncrypted {
key, err := parseCryptKey(rsp, repoID, user, repo.EncVersion)
if err != nil {
return err
}
cryptKey = key
}
gcID, err := repomgr.GetCurrentGCID(repo.StoreID)
if err != nil {
err := fmt.Errorf("failed to get current gc id for repo %s: %v", repoID, err)
return &appError{err, "", http.StatusInternalServerError}
}
var ids []string
var sizes []int64
if fsm.rstart >= 0 {
for _, filePath := range files {
id, size, err := indexBlocks(r.Context(), repo.StoreID, repo.Version, filePath, nil, cryptKey)
if err != nil {
if !errors.Is(err, context.Canceled) {
err := fmt.Errorf("failed to index blocks: %v", err)
return &appError{err, "", http.StatusInternalServerError}
}
return &appError{nil, "", http.StatusInternalServerError}
}
ids = append(ids, id)
sizes = append(sizes, size)
}
} else {
for _, handler := range fsm.fileHeaders {
id, size, err := indexBlocks(r.Context(), repo.StoreID, repo.Version, "", handler, cryptKey)
if err != nil {
if !errors.Is(err, context.Canceled) {
err := fmt.Errorf("failed to index blocks: %v", err)
return &appError{err, "", http.StatusInternalServerError}
}
return &appError{nil, "", http.StatusInternalServerError}
}
ids = append(ids, id)
sizes = append(sizes, size)
}
}
retStr, err := postFilesAndGenCommit(fileNames, repo.ID, user, canonPath, replace, ids, sizes, lastModify, gcID)
if err != nil {
if errors.Is(err, ErrGCConflict) {
return &appError{nil, "GC Conflict.\n", http.StatusConflict}
} else {
err := fmt.Errorf("failed to post files and gen commit: %v", err)
return &appError{err, "", http.StatusInternalServerError}
}
}
_, ok := r.Form["ret-json"]
if ok || isAjax {
rsp.Header().Set("Content-Type", "application/json; charset=utf-8")
rsp.Write([]byte(retStr))
} else {
var array []map[string]interface{}
err := json.Unmarshal([]byte(retStr), &array)
if err != nil {
msg := "Internal error.\n"
err := fmt.Errorf("failed to decode data to json: %v", err)
return &appError{err, msg, http.StatusInternalServerError}
}
var ids []string
for _, v := range array {
id, ok := v["id"].(string)
if !ok {
msg := "Internal error.\n"
err := fmt.Errorf("failed to assert")
return &appError{err, msg, http.StatusInternalServerError}
}
ids = append(ids, id)
}
newIDs := strings.Join(ids, "\t")
rsp.Write([]byte(newIDs))
}
return nil
}
func checkFilesWithSameName(repo *repomgr.Repo, canonPath string, fileNames []string) bool {
commit, err := commitmgr.Load(repo.ID, repo.HeadCommitID)
if err != nil {
return false
}
dir, err := fsmgr.GetSeafdirByPath(repo.StoreID, commit.RootID, canonPath)
if err != nil {
return false
}
for _, name := range fileNames {
uniqueName := genUniqueName(name, dir.Entries)
if uniqueName == "" {
return true
}
}
return false
}
func postFilesAndGenCommit(fileNames []string, repoID string, user, canonPath string, replace bool, ids []string, sizes []int64, lastModify int64, lastGCID string) (string, error) {
handleConncurrentUpdate := true
if !replace {
handleConncurrentUpdate = false
}
repo := repomgr.Get(repoID)
if repo == nil {
err := fmt.Errorf("failed to get repo %s", repoID)
return "", err
}
headCommit, err := commitmgr.Load(repo.ID, repo.HeadCommitID)
if err != nil {
err := fmt.Errorf("failed to get head commit for repo %s", repo.ID)
return "", err
}
var names []string
var retryCnt int
var dents []*fsmgr.SeafDirent
for i, name := range fileNames {
if i > len(ids)-1 || i > len(sizes)-1 {
break
}
mode := (syscall.S_IFREG | 0644)
mtime := lastModify
if mtime <= 0 {
mtime = time.Now().Unix()
}
dent := fsmgr.NewDirent(ids[i], name, uint32(mode), mtime, "", sizes[i])
dents = append(dents, dent)
}
retry:
rootID, err := doPostMultiFiles(repo, headCommit.RootID, canonPath, dents, user, replace, &names)
if err != nil {
err := fmt.Errorf("failed to post files to %s in repo %s", canonPath, repo.ID)
return "", err
}
var buf string
if len(fileNames) > 1 {
buf = fmt.Sprintf("Added \"%s\" and %d more files.", fileNames[0], len(fileNames)-1)
} else {
buf = fmt.Sprintf("Added \"%s\".", fileNames[0])
}
_, err = genNewCommit(repo, headCommit, rootID, user, buf, handleConncurrentUpdate, lastGCID, true)
if err != nil {
if err != ErrConflict {
err := fmt.Errorf("failed to generate new commit: %w", err)
return "", err
}
retryCnt++
/* Sleep random time between 0 and 3 seconds. */
random := rand.Intn(30) + 1
log.Debugf("concurrent upload retry :%d", retryCnt)
time.Sleep(time.Duration(random*100) * time.Millisecond)
repo = repomgr.Get(repoID)
if repo == nil {
err := fmt.Errorf("failed to get repo %s", repoID)
return "", err
}
headCommit, err = commitmgr.Load(repo.ID, repo.HeadCommitID)
if err != nil {
err := fmt.Errorf("failed to get head commit for repo %s", repo.ID)
return "", err
}
goto retry
}
go mergeVirtualRepoPool.AddTask(repo.ID, "")
retJSON, err := formatJSONRet(names, ids, sizes)
if err != nil {
err := fmt.Errorf("failed to format json data")
return "", err
}
return string(retJSON), nil
}
func formatJSONRet(nameList, idList []string, sizeList []int64) ([]byte, error) {
var array []map[string]interface{}
for i := range nameList {
if i >= len(idList) || i >= len(sizeList) {
break
}
obj := make(map[string]interface{})
obj["name"] = nameList[i]
obj["id"] = idList[i]
obj["size"] = sizeList[i]
array = append(array, obj)
}
jsonstr, err := json.Marshal(array)
if err != nil {
err := fmt.Errorf("failed to convert array to json")
return nil, err
}
return jsonstr, nil
}
func getCanonPath(p string) string {
formatPath := strings.Replace(p, "\\", "/", -1)
return filepath.Join(formatPath)
}
var (
ErrConflict = errors.New("Concurent upload conflict")
ErrGCConflict = errors.New("GC Conflict")
)
func genNewCommit(repo *repomgr.Repo, base *commitmgr.Commit, newRoot, user, desc string, handleConncurrentUpdate bool, lastGCID string, checkGC bool) (string, error) {
var retryCnt int
repoID := repo.ID
commit := commitmgr.NewCommit(repoID, base.CommitID, newRoot, user, desc)
repomgr.RepoToCommit(repo, commit)
err := commitmgr.Save(commit)
if err != nil {
err := fmt.Errorf("failed to add commit: %v", err)
return "", err
}
var commitID string
maxRetryCnt := 10
for {
retry, err := genCommitNeedRetry(repo, base, commit, newRoot, user, handleConncurrentUpdate, &commitID, lastGCID, checkGC)
if err != nil {
return "", err
}
if !retry {
break
}
if !handleConncurrentUpdate {
return "", ErrConflict
}
if retryCnt < maxRetryCnt {
/* Sleep random time between 0 and 3 seconds. */
random := rand.Intn(30) + 1
time.Sleep(time.Duration(random*100) * time.Millisecond)
repo = repomgr.Get(repoID)
if repo == nil {
err := fmt.Errorf("repo %s doesn't exist", repoID)
return "", err
}
retryCnt++
} else {
err := fmt.Errorf("stop updating repo %s after %d retries", repoID, maxRetryCnt)
return "", err
}
}
return commitID, nil
}
func fastForwardOrMerge(user, token string, repo *repomgr.Repo, base, newCommit *commitmgr.Commit) error {
var retryCnt int
checkGC, err := repomgr.HasLastGCID(repo.ID, token)
if err != nil {
return err
}
var lastGCID string
if checkGC {
lastGCID, _ = repomgr.GetLastGCID(repo.ID, token)
repomgr.RemoveLastGCID(repo.ID, token)
}
for {
retry, err := genCommitNeedRetry(repo, base, newCommit, newCommit.RootID, user, true, nil, lastGCID, checkGC)
if err != nil {
return err
}
if !retry {
break
}
if retryCnt < 3 {
random := rand.Intn(10) + 1
time.Sleep(time.Duration(random*100) * time.Millisecond)
retryCnt++
} else {
err = fmt.Errorf("stop updating repo %s after 3 retries", repo.ID)
return err
}
}
return nil
}
func genCommitNeedRetry(repo *repomgr.Repo, base *commitmgr.Commit, commit *commitmgr.Commit, newRoot, user string, handleConncurrentUpdate bool, commitID *string, lastGCID string, checkGC bool) (bool, error) {
var secondParentID string
repoID := repo.ID
var mergeDesc string
var mergedCommit *commitmgr.Commit
currentHead, err := commitmgr.Load(repo.ID, repo.HeadCommitID)
if err != nil {
err := fmt.Errorf("failed to get head commit for repo %s", repoID)
return false, err
}
if base.CommitID != currentHead.CommitID {
if !handleConncurrentUpdate {
return false, ErrConflict
}
roots := []string{base.RootID, currentHead.RootID, newRoot}
opt := new(mergeOptions)
opt.remoteRepoID = repoID
opt.remoteHead = commit.CommitID
err := mergeTrees(repo.StoreID, roots, opt)
if err != nil {
err := fmt.Errorf("failed to merge")
return false, err
}
if !opt.conflict {
mergeDesc = "Auto merge by system"
} else {
mergeDesc = genMergeDesc(repo, opt.mergedRoot, currentHead.RootID, newRoot)
if mergeDesc == "" {
mergeDesc = "Auto merge by system"
}
}
secondParentID = commit.CommitID
mergedCommit = commitmgr.NewCommit(repoID, currentHead.CommitID, opt.mergedRoot, user, mergeDesc)
repomgr.RepoToCommit(repo, mergedCommit)
mergedCommit.SecondParentID.SetValid(commit.CommitID)
mergedCommit.NewMerge = 1
if opt.conflict {
mergedCommit.Conflict = 1
}
err = commitmgr.Save(mergedCommit)
if err != nil {
err := fmt.Errorf("failed to add commit: %v", err)
return false, err
}
} else {
mergedCommit = commit
}
gcConflict, err := updateBranch(repoID, repo.StoreID, mergedCommit.CommitID, currentHead.CommitID, secondParentID, checkGC, lastGCID)
if gcConflict {
return false, err
}
if err != nil {
return true, nil
}
if commitID != nil {
*commitID = mergedCommit.CommitID
}
return false, nil
}
func genMergeDesc(repo *repomgr.Repo, mergedRoot, p1Root, p2Root string) string {
var results []*diff.DiffEntry
err := diff.DiffMergeRoots(repo.StoreID, mergedRoot, p1Root, p2Root, &results, true)
if err != nil {
return ""
}
desc := diff.DiffResultsToDesc(results)
return desc
}
func updateBranch(repoID, originRepoID, newCommitID, oldCommitID, secondParentID string, checkGC bool, lastGCID string) (gcConflict bool, err error) {
ctx, cancel := context.WithTimeout(context.Background(), option.DBOpTimeout)
defer cancel()
trans, err := seafileDB.BeginTx(ctx, nil)
if err != nil {
err := fmt.Errorf("failed to start transaction: %v", err)
return false, err
}
var row *sql.Row
var sqlStr string
if checkGC {
sqlStr = "SELECT gc_id FROM GCID WHERE repo_id = ? FOR UPDATE"
if originRepoID == "" {
row = trans.QueryRowContext(ctx, sqlStr, repoID)
} else {
row = trans.QueryRowContext(ctx, sqlStr, originRepoID)
}
var gcID sql.NullString
if err := row.Scan(&gcID); err != nil {
if err != sql.ErrNoRows {
trans.Rollback()
return false, err
}
}
if lastGCID != gcID.String {
err = fmt.Errorf("Head branch update for repo %s conflicts with GC.", repoID)
trans.Rollback()
return true, ErrGCConflict
}
}
var commitID string
name := "master"
sqlStr = "SELECT commit_id FROM Branch WHERE name = ? AND repo_id = ? FOR UPDATE"
row = trans.QueryRowContext(ctx, sqlStr, name, repoID)
if err := row.Scan(&commitID); err != nil {
if err != sql.ErrNoRows {
trans.Rollback()
return false, err
}
}
if oldCommitID != commitID {
trans.Rollback()
err := fmt.Errorf("head commit id has changed")
return false, err
}
sqlStr = "UPDATE Branch SET commit_id = ? WHERE name = ? AND repo_id = ?"
_, err = trans.ExecContext(ctx, sqlStr, newCommitID, name, repoID)
if err != nil {
trans.Rollback()
return false, err
}
trans.Commit()
if secondParentID != "" {
if err := onBranchUpdated(repoID, secondParentID, false); err != nil {
return false, err
}
}
if err := onBranchUpdated(repoID, newCommitID, true); err != nil {
return false, err
}
return false, nil
}
func onBranchUpdated(repoID string, commitID string, updateRepoInfo bool) error {
if updateRepoInfo {
if err := repomgr.UpdateRepoInfo(repoID, commitID); err != nil {
return err
}
}
if option.EnableNotification {
notifRepoUpdate(repoID, commitID)
}
isVirtual, err := repomgr.IsVirtualRepo(repoID)
if err != nil {
return err
}
if isVirtual {
return nil
}
publishUpdateEvent(repoID, commitID)
return nil
}
type notifEvent struct {
Type string `json:"type"`
Content *repoUpdateEvent `json:"content"`
}
type repoUpdateEvent struct {
RepoID string `json:"repo_id"`
CommitID string `json:"commit_id"`
}
func notifRepoUpdate(repoID string, commitID string) error {
content := new(repoUpdateEvent)
content.RepoID = repoID
content.CommitID = commitID
event := new(notifEvent)
event.Type = "repo-update"
event.Content = content
msg, err := json.Marshal(event)
if err != nil {
log.Errorf("failed to encode repo update event: %v", err)
return err
}
url := fmt.Sprintf("%s/events", option.NotificationURL)
exp := time.Now().Add(time.Second * 300).Unix()
token, err := utils.GenNotifJWTToken(repoID, "", exp)
if err != nil {
log.Errorf("failed to generate jwt token: %v", err)
return err
}
header := map[string][]string{
"Authorization": {"Token " + token},
}
_, _, err = utils.HttpCommon("POST", url, header, bytes.NewReader(msg))
if err != nil {
log.Warnf("failed to send repo update event: %v", err)
return err
}
return nil
}
func doPostMultiFiles(repo *repomgr.Repo, rootID, parentDir string, dents []*fsmgr.SeafDirent, user string, replace bool, names *[]string) (string, error) {
if parentDir[0] == '/' {
parentDir = parentDir[1:]
}
id, err := postMultiFilesRecursive(repo, rootID, parentDir, user, dents, replace, names)
if err != nil {
err := fmt.Errorf("failed to post multi files: %v", err)
return "", err
}
return id, nil
}
func postMultiFilesRecursive(repo *repomgr.Repo, dirID, toPath, user string, dents []*fsmgr.SeafDirent, replace bool, names *[]string) (string, error) {
olddir, err := fsmgr.GetSeafdir(repo.StoreID, dirID)
if err != nil {
err := fmt.Errorf("failed to get dir")
return "", err
}
var ret string
if toPath == "" {
err := addNewEntries(repo, user, &olddir.Entries, dents, replace, names)
if err != nil {
err := fmt.Errorf("failed to add new entries: %v", err)
return "", err
}
newdir, err := fsmgr.NewSeafdir(1, olddir.Entries)
if err != nil {
err := fmt.Errorf("failed to new seafdir: %v", err)
return "", err
}
err = fsmgr.SaveSeafdir(repo.StoreID, newdir)
if err != nil {
err := fmt.Errorf("failed to save seafdir %s/%s", repo.ID, newdir.DirID)
return "", err
}
return newdir.DirID, nil
}
var remain string
firstName := toPath
if slash := strings.Index(toPath, "/"); slash >= 0 {
remain = toPath[slash+1:]
firstName = toPath[:slash]
}
entries := olddir.Entries
for i, dent := range entries {
if dent.Name != firstName {
continue
}
id, err := postMultiFilesRecursive(repo, dent.ID, remain, user, dents, replace, names)
if err != nil {
err := fmt.Errorf("failed to post dirent %s: %v", dent.Name, err)
return "", err
}
ret = id
if id != "" {
entries[i].ID = id
entries[i].Mtime = time.Now().Unix()
}
break
}
if ret != "" {
newdir, err := fsmgr.NewSeafdir(1, entries)
if err != nil {
err := fmt.Errorf("failed to new seafdir: %v", err)
return "", err
}
err = fsmgr.SaveSeafdir(repo.StoreID, newdir)
if err != nil {
err := fmt.Errorf("failed to save seafdir %s/%s", repo.ID, newdir.DirID)
return "", err
}
ret = newdir.DirID
} else {
// The ret will be an empty string when failed to find parent dir, an error should be returned in such case.
err := fmt.Errorf("failed to find parent dir for %s", toPath)
return "", err
}
return ret, nil
}
func addNewEntries(repo *repomgr.Repo, user string, oldDents *[]*fsmgr.SeafDirent, newDents []*fsmgr.SeafDirent, replaceExisted bool, names *[]string) error {
for _, dent := range newDents {
var replace bool
var uniqueName string
if replaceExisted {
for i, entry := range *oldDents {
if entry.Name == dent.Name {
replace = true
*oldDents = append((*oldDents)[:i], (*oldDents)[i+1:]...)
break
}
}
}
if replace {
uniqueName = dent.Name
} else {
uniqueName = genUniqueName(dent.Name, *oldDents)
}
if uniqueName != "" {
newDent := fsmgr.NewDirent(dent.ID, uniqueName, dent.Mode, dent.Mtime, user, dent.Size)
*oldDents = append(*oldDents, newDent)
*names = append(*names, uniqueName)
} else {
err := fmt.Errorf("failed to generate unique name for %s", dent.Name)
return err
}
}
sort.Sort(Dirents(*oldDents))
return nil
}
func genUniqueName(fileName string, entries []*fsmgr.SeafDirent) string {
var uniqueName string
var name string
i := 1
dot := strings.LastIndex(fileName, ".")
if dot < 0 {
name = fileName
} else {
name = fileName[:dot]
}
uniqueName = fileName
for nameExists(entries, uniqueName) && i <= duplicateNamesCount {
if dot < 0 {
uniqueName = fmt.Sprintf("%s (%d)", name, i)
} else {
uniqueName = fmt.Sprintf("%s (%d).%s", name, i, fileName[dot+1:])
}
i++
}
if i <= duplicateNamesCount {
return uniqueName
}
return ""
}
func nameExists(entries []*fsmgr.SeafDirent, fileName string) bool {
for _, entry := range entries {
if entry.Name == fileName {
return true
}
}
return false
}
func shouldIgnore(fileName string) bool {
parts := strings.Split(fileName, "/")
for _, name := range parts {
if name == ".." {
return true
}
}
return false
}
func shouldIgnoreFile(fileName string) bool {
if shouldIgnore(fileName) {
return true
}
if !utf8.ValidString(fileName) {
log.Warnf("file name %s contains non-UTF8 characters, skip", fileName)
return true
}
if len(fileName) >= 256 {
return true
}
if strings.Contains(fileName, "/") {
return true
}
return false
}
func indexBlocks(ctx context.Context, repoID string, version int, filePath string, handler *multipart.FileHeader, cryptKey *seafileCrypt) (string, int64, error) {
req := &indexFileRequest{
ctx: ctx,
repoID: repoID,
version: version,
filePath: filePath,
handler: handler,
cryptKey: cryptKey,
}
recvChan := make(chan *indexFileResult)
indexFilePool.AddTask(recvChan, req)
result := <-recvChan
return result.fileID, result.size, result.err
}
type indexFileRequest struct {
ctx context.Context
repoID string
version int
filePath string
handler *multipart.FileHeader
cryptKey *seafileCrypt
}
type indexFileResult struct {
fileID string
size int64
err error
}
func indexFileWorker(args ...any) error {
resChan := args[0].(chan *indexFileResult)
req := args[1].(*indexFileRequest)
ctx := req.ctx
repoID := req.repoID
version := req.version
filePath := req.filePath
handler := req.handler
cryptKey := req.cryptKey
var size int64
if handler != nil {
size = handler.Size
} else {
f, err := os.Open(filePath)
if err != nil {
err := fmt.Errorf("failed to open file: %s: %v", filePath, err)
resChan <- &indexFileResult{err: err}
return nil
}
defer f.Close()
fileInfo, err := f.Stat()
if err != nil {
err := fmt.Errorf("failed to stat file %s: %v", filePath, err)
resChan <- &indexFileResult{err: err}
return nil
}
size = fileInfo.Size()
}
if size == 0 {
resChan <- &indexFileResult{fileID: fsmgr.EmptySha1, size: 0}
return nil
}
chunkJobs := make(chan chunkingData, 10)
results := make(chan chunkingResult, 10)
go createChunkPool(ctx, int(option.MaxIndexingThreads), chunkJobs, results)
var blkSize int64
var offset int64
jobNum := (uint64(size) + option.FixedBlockSize - 1) / option.FixedBlockSize
blkIDs := make([]string, jobNum)
left := size
for {
if uint64(left) >= option.FixedBlockSize {
blkSize = int64(option.FixedBlockSize)
} else {
blkSize = left
}
if left > 0 {
job := chunkingData{repoID, filePath, handler, offset, cryptKey}
select {
case chunkJobs <- job:
left -= blkSize
offset += blkSize
case result := <-results:
if result.err != nil {
close(chunkJobs)
go RecoverWrapper(func() {
for result := range results {
_ = result
}
})
resChan <- &indexFileResult{err: result.err}
return nil
}
blkIDs[result.idx] = result.blkID
}
} else {
close(chunkJobs)
for result := range results {
if result.err != nil {
go RecoverWrapper(func() {
for result := range results {
_ = result
}
})
resChan <- &indexFileResult{err: result.err}
return nil
}
blkIDs[result.idx] = result.blkID
}
break
}
}
fileID, err := writeSeafile(repoID, version, size, blkIDs)
if err != nil {
err := fmt.Errorf("failed to write seafile: %v", err)
resChan <- &indexFileResult{err: err}
return nil
}
resChan <- &indexFileResult{fileID: fileID, size: size}
return nil
}
func writeSeafile(repoID string, version int, fileSize int64, blkIDs []string) (string, error) {
seafile, err := fsmgr.NewSeafile(version, fileSize, blkIDs)
if err != nil {
err := fmt.Errorf("failed to new seafile: %v", err)
return "", err
}
err = fsmgr.SaveSeafile(repoID, seafile)
if err != nil {
err := fmt.Errorf("failed to save seafile %s/%s", repoID, seafile.FileID)
return "", err
}
return seafile.FileID, nil
}
type chunkingData struct {
repoID string
filePath string
handler *multipart.FileHeader
offset int64
cryptKey *seafileCrypt
}
type chunkingResult struct {
idx int64
blkID string
err error
}
func createChunkPool(ctx context.Context, n int, chunkJobs chan chunkingData, res chan chunkingResult) {
defer func() {
if err := recover(); err != nil {
log.Errorf("panic: %v\n%s", err, debug.Stack())
}
}()
var wg sync.WaitGroup
for i := 0; i < n; i++ {
wg.Add(1)
go chunkingWorker(ctx, &wg, chunkJobs, res)
}
wg.Wait()
close(res)
}
func chunkingWorker(ctx context.Context, wg *sync.WaitGroup, chunkJobs chan chunkingData, res chan chunkingResult) {
defer func() {
if err := recover(); err != nil {
log.Errorf("panic: %v\n%s", err, debug.Stack())
}
}()
for job := range chunkJobs {
select {
case <-ctx.Done():
err := context.Canceled
result := chunkingResult{-1, "", err}
res <- result
wg.Done()
return
default:
}
job := job
blkID, err := chunkFile(job)
idx := job.offset / int64(option.FixedBlockSize)
result := chunkingResult{idx, blkID, err}
res <- result
}
wg.Done()
}
func chunkFile(job chunkingData) (string, error) {
repoID := job.repoID
offset := job.offset
filePath := job.filePath
handler := job.handler
blkSize := option.FixedBlockSize
cryptKey := job.cryptKey
var file multipart.File
if handler != nil {
f, err := handler.Open()
if err != nil {
err := fmt.Errorf("failed to open file for read: %v", err)
return "", err
}
defer f.Close()
file = f
} else {
f, err := os.Open(filePath)
if err != nil {
err := fmt.Errorf("failed to open file for read: %v", err)
return "", err
}
defer f.Close()
file = f
}
_, err := file.Seek(offset, io.SeekStart)
if err != nil {
err := fmt.Errorf("failed to seek file: %v", err)
return "", err
}
buf := make([]byte, blkSize)
n, err := file.Read(buf)
if err != nil {
err := fmt.Errorf("failed to seek file: %v", err)
return "", err
}
buf = buf[:n]
blkID, err := writeChunk(repoID, buf, int64(n), cryptKey)
if err != nil {
err := fmt.Errorf("failed to write chunk: %v", err)
return "", err
}
return blkID, nil
}
func writeChunk(repoID string, input []byte, blkSize int64, cryptKey *seafileCrypt) (string, error) {
var blkID string
if cryptKey != nil && blkSize > 0 {
encoded, err := cryptKey.encrypt(input)
if err != nil {
err := fmt.Errorf("failed to encrypt block: %v", err)
return "", err
}
checkSum := sha1.Sum(encoded)
blkID = hex.EncodeToString(checkSum[:])
if blockmgr.Exists(repoID, blkID) {
return blkID, nil
}
reader := bytes.NewReader(encoded)
err = blockmgr.Write(repoID, blkID, reader)
if err != nil {
err := fmt.Errorf("failed to write block: %v", err)
return "", err
}
} else {
checkSum := sha1.Sum(input)
blkID = hex.EncodeToString(checkSum[:])
if blockmgr.Exists(repoID, blkID) {
return blkID, nil
}
reader := bytes.NewReader(input)
err := blockmgr.Write(repoID, blkID, reader)
if err != nil {
err := fmt.Errorf("failed to write block: %v", err)
return "", err
}
}
return blkID, nil
}
func checkTmpFileList(fsm *recvData) *appError {
var totalSize int64
if fsm.rstart >= 0 {
for _, tmpFile := range fsm.files {
fileInfo, err := os.Stat(tmpFile)
if err != nil {
msg := "Internal error.\n"
err := fmt.Errorf("[upload] Failed to stat temp file %s", tmpFile)
return &appError{err, msg, http.StatusInternalServerError}
}
totalSize += fileInfo.Size()
}
} else {
for _, handler := range fsm.fileHeaders {
totalSize += handler.Size
}
}
if option.MaxUploadSize > 0 && uint64(totalSize) > option.MaxUploadSize {
msg := "File size is too large.\n"
return &appError{nil, msg, seafHTTPResTooLarge}
}
return nil
}
func checkParentDir(repoID string, parentDir string) *appError {
repo := repomgr.Get(repoID)
if repo == nil {
msg := "Failed to get repo.\n"
err := fmt.Errorf("Failed to get repo %s", repoID)
return &appError{err, msg, http.StatusInternalServerError}
}
commit, err := commitmgr.Load(repoID, repo.HeadCommitID)
if err != nil {
msg := "Failed to get head commit.\n"
err := fmt.Errorf("Failed to get head commit for repo %s", repoID)
return &appError{err, msg, http.StatusInternalServerError}
}
canonPath := getCanonPath(parentDir)
_, err = fsmgr.GetSeafdirByPath(repo.StoreID, commit.RootID, canonPath)
if err != nil {
msg := "Parent dir doesn't exist.\n"
return &appError{nil, msg, http.StatusBadRequest}
}
return nil
}
func isParentMatched(uploadDir, parentDir string) bool {
uploadCanon := filepath.Join("/", uploadDir)
parentCanon := filepath.Join("/", parentDir)
return uploadCanon == parentCanon
}
func parseContentRange(ranges string, fsm *recvData) bool {
start := strings.Index(ranges, "bytes")
end := strings.Index(ranges, "-")
slash := strings.Index(ranges, "/")
if start < 0 || end < 0 || slash < 0 {
return false
}
startStr := strings.TrimLeft(ranges[start+len("bytes"):end], " ")
firstByte, err := strconv.ParseInt(startStr, 10, 64)
if err != nil {
return false
}
lastByte, err := strconv.ParseInt(ranges[end+1:slash], 10, 64)
if err != nil {
return false
}
fileSize, err := strconv.ParseInt(ranges[slash+1:], 10, 64)
if err != nil {
return false
}
if firstByte > lastByte || lastByte >= fileSize {
return false
}
fsm.rstart = firstByte
fsm.rend = lastByte
fsm.fsize = fileSize
return true
}
type webaccessInfo struct {
repoID string
objID string
op string
user string
}
func parseWebaccessInfo(token string) (*webaccessInfo, *appError) {
webaccess, err := rpcclient.Call("seafile_web_query_access_token", token)
if err != nil {
err := fmt.Errorf("failed to get web access token: %v", err)
return nil, &appError{err, "", http.StatusInternalServerError}
}
if webaccess == nil {
msg := "Access token not found"
return nil, &appError{err, msg, http.StatusForbidden}
}
webaccessMap, ok := webaccess.(map[string]interface{})
if !ok {
return nil, &appError{nil, "", http.StatusInternalServerError}
}
accessInfo := new(webaccessInfo)
repoID, ok := webaccessMap["repo-id"].(string)
if !ok {
return nil, &appError{nil, "", http.StatusInternalServerError}
}
accessInfo.repoID = repoID
id, ok := webaccessMap["obj-id"].(string)
if !ok {
return nil, &appError{nil, "", http.StatusInternalServerError}
}
accessInfo.objID = id
op, ok := webaccessMap["op"].(string)
if !ok {
return nil, &appError{nil, "", http.StatusInternalServerError}
}
accessInfo.op = op
user, ok := webaccessMap["username"].(string)
if !ok {
return nil, &appError{nil, "", http.StatusInternalServerError}
}
accessInfo.user = user
return accessInfo, nil
}
func updateDir(repoID, dirPath, newDirID, user, headID string) (string, error) {
repo := repomgr.Get(repoID)
if repo == nil {
err := fmt.Errorf("failed to get repo %.10s", repoID)
return "", err
}
var base string
if headID == "" {
base = repo.HeadCommitID
} else {
base = headID
}
headCommit, err := commitmgr.Load(repo.ID, base)
if err != nil {
err := fmt.Errorf("failed to get head commit for repo %s", repo.ID)
return "", err
}
if dirPath == "/" {
commitDesc := genCommitDesc(repo, newDirID, headCommit.RootID)
if commitDesc == "" {
commitDesc = "Auto merge by system"
}
newCommitID, err := genNewCommit(repo, headCommit, newDirID, user, commitDesc, true, "", false)
if err != nil {
err := fmt.Errorf("failed to generate new commit: %v", err)
return "", err
}
return newCommitID, nil
}
parent := filepath.Dir(dirPath)
canonPath := getCanonPath(parent)
dirName := filepath.Base(dirPath)
dir, err := fsmgr.GetSeafdirByPath(repo.StoreID, headCommit.RootID, canonPath)
if err != nil {
err := fmt.Errorf("dir %s doesn't exist in repo %s", canonPath, repo.StoreID)
return "", err
}
var exists bool
for _, de := range dir.Entries {
if de.Name == dirName {
exists = true
}
}
if !exists {
err := fmt.Errorf("directory %s doesn't exist in repo %s", dirName, repo.StoreID)
return "", err
}
newDent := fsmgr.NewDirent(newDirID, dirName, (syscall.S_IFDIR | 0644), time.Now().Unix(), "", 0)
rootID, err := doPutFile(repo, headCommit.RootID, canonPath, newDent)
if err != nil || rootID == "" {
err := fmt.Errorf("failed to put file")
return "", err
}
commitDesc := genCommitDesc(repo, rootID, headCommit.RootID)
if commitDesc == "" {
commitDesc = "Auto merge by system"
}
newCommitID, err := genNewCommit(repo, headCommit, rootID, user, commitDesc, true, "", false)
if err != nil {
err := fmt.Errorf("failed to generate new commit: %v", err)
return "", err
}
go updateSizePool.AddTask(repoID)
return newCommitID, nil
}
func genCommitDesc(repo *repomgr.Repo, root, parentRoot string) string {
var results []*diff.DiffEntry
err := diff.DiffCommitRoots(repo.StoreID, parentRoot, root, &results, true)
if err != nil {
return ""
}
desc := diff.DiffResultsToDesc(results)
return desc
}
func doPutFile(repo *repomgr.Repo, rootID, parentDir string, dent *fsmgr.SeafDirent) (string, error) {
if strings.Index(parentDir, "/") == 0 {
parentDir = parentDir[1:]
}
return putFileRecursive(repo, rootID, parentDir, dent)
}
func putFileRecursive(repo *repomgr.Repo, dirID, toPath string, newDent *fsmgr.SeafDirent) (string, error) {
olddir, err := fsmgr.GetSeafdir(repo.StoreID, dirID)
if err != nil {
err := fmt.Errorf("failed to get dir")
return "", err
}
entries := olddir.Entries
var ret string
if toPath == "" {
var newEntries []*fsmgr.SeafDirent
for _, dent := range entries {
if dent.Name == newDent.Name {
newEntries = append(newEntries, newDent)
} else {
newEntries = append(newEntries, dent)
}
}
newdir, err := fsmgr.NewSeafdir(1, newEntries)
if err != nil {
err := fmt.Errorf("failed to new seafdir: %v", err)
return "", err
}
err = fsmgr.SaveSeafdir(repo.StoreID, newdir)
if err != nil {
err := fmt.Errorf("failed to save seafdir %s/%s", repo.ID, newdir.DirID)
return "", err
}
return newdir.DirID, nil
}
var remain string
firstName := toPath
if slash := strings.Index(toPath, "/"); slash >= 0 {
remain = toPath[slash+1:]
firstName = toPath[:slash]
}
for _, dent := range entries {
if dent.Name != firstName {
continue
}
id, err := putFileRecursive(repo, dent.ID, remain, newDent)
if err != nil {
err := fmt.Errorf("failed to put dirent %s: %v", dent.Name, err)
return "", err
}
if id != "" {
dent.ID = id
dent.Mtime = time.Now().Unix()
}
ret = id
break
}
if ret != "" {
newdir, err := fsmgr.NewSeafdir(1, entries)
if err != nil {
err := fmt.Errorf("failed to new seafdir: %v", err)
return "", err
}
err = fsmgr.SaveSeafdir(repo.StoreID, newdir)
if err != nil {
err := fmt.Errorf("failed to save seafdir %s/%s", repo.ID, newdir.DirID)
return "", err
}
ret = newdir.DirID
} else {
err := fmt.Errorf("failed to find parent dir for %s", toPath)
return "", err
}
return ret, nil
}
func updateAPICB(rsp http.ResponseWriter, r *http.Request) *appError {
if r.Method == "OPTIONS" {
setAccessControl(rsp)
rsp.WriteHeader(http.StatusOK)
return nil
}
fsm, err := parseUploadHeaders(r)
if err != nil {
formatJSONError(rsp, err)
return err
}
if err := doUpdate(rsp, r, fsm, false); err != nil {
formatJSONError(rsp, err)
return err
}
return nil
}
func updateAjaxCB(rsp http.ResponseWriter, r *http.Request) *appError {
if r.Method == "OPTIONS" {
setAccessControl(rsp)
rsp.WriteHeader(http.StatusOK)
return nil
}
fsm, err := parseUploadHeaders(r)
if err != nil {
formatJSONError(rsp, err)
return err
}
if err := doUpdate(rsp, r, fsm, true); err != nil {
formatJSONError(rsp, err)
return err
}
return nil
}
func doUpdate(rsp http.ResponseWriter, r *http.Request, fsm *recvData, isAjax bool) *appError {
setAccessControl(rsp)
if err := r.ParseMultipartForm(1 << 20); err != nil {
return &appError{nil, "", http.StatusBadRequest}
}
defer r.MultipartForm.RemoveAll()
repoID := fsm.repoID
user := fsm.user
targetFile := normalizeUTF8Path(r.FormValue("target_file"))
if targetFile == "" {
msg := "No target_file given.\n"
return &appError{nil, msg, http.StatusBadRequest}
}
lastModifyStr := normalizeUTF8Path(r.FormValue("last_modify"))
var lastModify int64
if lastModifyStr != "" {
t, err := time.Parse(time.RFC3339, lastModifyStr)
if err == nil {
lastModify = t.Unix()
}
}
parentDir := filepath.Dir(targetFile)
fileName := filepath.Base(targetFile)
defer clearTmpFile(fsm, parentDir)
if fsm.rstart >= 0 {
if parentDir[0] != '/' {
msg := "Invalid parent dir"
return &appError{nil, msg, http.StatusBadRequest}
}
formFiles := r.MultipartForm.File
files, ok := formFiles["file"]
if !ok {
msg := "No file in multipart form.\n"
return &appError{nil, msg, http.StatusBadRequest}
}
if len(files) > 1 {
msg := "More files in one request"
return &appError{nil, msg, http.StatusBadRequest}
}
err := writeBlockDataToTmpFile(r, fsm, formFiles, repoID, parentDir)
if err != nil {
msg := "Internal error.\n"
err := fmt.Errorf("failed to write block data to tmp file: %v", err)
return &appError{err, msg, http.StatusInternalServerError}
}
if fsm.rend != fsm.fsize-1 {
rsp.Header().Set("Content-Type", "application/json; charset=utf-8")
success := "{\"success\": true}"
rsp.Write([]byte(success))
return nil
}
} else {
formFiles := r.MultipartForm.File
fileHeaders, ok := formFiles["file"]
if !ok {
msg := "No file in multipart form.\n"
return &appError{nil, msg, http.StatusBadRequest}
}
if len(fileHeaders) > 1 {
msg := "More files in one request"
return &appError{nil, msg, http.StatusBadRequest}
}
for _, handler := range fileHeaders {
fileName := filepath.Base(handler.Filename)
fsm.fileNames = append(fsm.fileNames, fileName)
fsm.fileHeaders = append(fsm.fileHeaders, handler)
}
}
if fsm.fileNames == nil {
msg := "No file.\n"
return &appError{nil, msg, http.StatusBadRequest}
}
if err := checkParentDir(repoID, parentDir); err != nil {
return err
}
if err := checkTmpFileList(fsm); err != nil {
return err
}
var contentLen int64
if fsm.fsize > 0 {
contentLen = fsm.fsize
} else {
lenstr := r.Header.Get("Content-Length")
if lenstr == "" {
contentLen = -1
} else {
tmpLen, err := strconv.ParseInt(lenstr, 10, 64)
if err != nil {
msg := "Internal error.\n"
err := fmt.Errorf("failed to parse content len: %v", err)
return &appError{err, msg, http.StatusInternalServerError}
}
contentLen = tmpLen
}
}
ret, err := checkQuota(repoID, contentLen)
if err != nil {
msg := "Internal error.\n"
err := fmt.Errorf("failed to check quota: %v", err)
return &appError{err, msg, http.StatusInternalServerError}
}
if ret == 1 {
msg := "Out of quota.\n"
return &appError{nil, msg, seafHTTPResNoQuota}
}
headIDs, ok := r.Form["head"]
var headID string
if ok {
headID = headIDs[0]
}
if err := putFile(rsp, r, repoID, parentDir, user, fileName, fsm, headID, lastModify, isAjax); err != nil {
return err
}
oper := "web-file-upload"
sendStatisticMsg(repoID, user, oper, uint64(contentLen))
return nil
}
func putFile(rsp http.ResponseWriter, r *http.Request, repoID, parentDir, user, fileName string, fsm *recvData, headID string, lastModify int64, isAjax bool) *appError {
files := fsm.files
repo := repomgr.Get(repoID)
if repo == nil {
msg := "Failed to get repo.\n"
err := fmt.Errorf("Failed to get repo %s", repoID)
return &appError{err, msg, http.StatusInternalServerError}
}
var base string
if headID != "" {
base = headID
} else {
base = repo.HeadCommitID
}
headCommit, err := commitmgr.Load(repo.ID, base)
if err != nil {
msg := "Failed to get head commit.\n"
err := fmt.Errorf("failed to get head commit for repo %s", repo.ID)
return &appError{err, msg, http.StatusInternalServerError}
}
canonPath := getCanonPath(parentDir)
if shouldIgnoreFile(fileName) {
msg := fmt.Sprintf("invalid fileName: %s.\n", fileName)
return &appError{nil, msg, http.StatusBadRequest}
}
if strings.Contains(parentDir, "//") {
msg := "parent_dir contains // sequence.\n"
return &appError{nil, msg, http.StatusBadRequest}
}
exist, _ := checkFileExists(repo.StoreID, headCommit.RootID, canonPath, fileName)
if !exist {
msg := "File does not exist.\n"
return &appError{nil, msg, seafHTTPResNotExists}
}
var cryptKey *seafileCrypt
if repo.IsEncrypted {
key, err := parseCryptKey(rsp, repoID, user, repo.EncVersion)
if err != nil {
return err
}
cryptKey = key
}
gcID, err := repomgr.GetCurrentGCID(repo.StoreID)
if err != nil {
err := fmt.Errorf("failed to get current gc id: %v", err)
return &appError{err, "", http.StatusInternalServerError}
}
var fileID string
var size int64
if fsm.rstart >= 0 {
filePath := files[0]
id, fileSize, err := indexBlocks(r.Context(), repo.StoreID, repo.Version, filePath, nil, cryptKey)
if err != nil {
if !errors.Is(err, context.Canceled) {
err := fmt.Errorf("failed to index blocks: %w", err)
return &appError{err, "", http.StatusInternalServerError}
}
return &appError{nil, "", http.StatusInternalServerError}
}
fileID = id
size = fileSize
} else {
handler := fsm.fileHeaders[0]
id, fileSize, err := indexBlocks(r.Context(), repo.StoreID, repo.Version, "", handler, cryptKey)
if err != nil {
if !errors.Is(err, context.Canceled) {
err := fmt.Errorf("failed to index blocks: %w", err)
return &appError{err, "", http.StatusInternalServerError}
}
return &appError{nil, "", http.StatusInternalServerError}
}
fileID = id
size = fileSize
}
fullPath := filepath.Join(parentDir, fileName)
oldFileID, _, _ := fsmgr.GetObjIDByPath(repo.StoreID, headCommit.RootID, fullPath)
if fileID == oldFileID {
if isAjax {
retJSON, err := formatUpdateJSONRet(fileName, fileID, size)
if err != nil {
err := fmt.Errorf("failed to format json data")
return &appError{err, "", http.StatusInternalServerError}
}
rsp.Write(retJSON)
} else {
rsp.Write([]byte(fileID))
}
return nil
}
mtime := time.Now().Unix()
if lastModify > 0 {
mtime = lastModify
}
mode := (syscall.S_IFREG | 0644)
newDent := fsmgr.NewDirent(fileID, fileName, uint32(mode), mtime, user, size)
var names []string
rootID, err := doPostMultiFiles(repo, headCommit.RootID, canonPath, []*fsmgr.SeafDirent{newDent}, user, true, &names)
if err != nil {
err := fmt.Errorf("failed to put file %s to %s in repo %s: %v", fileName, canonPath, repo.ID, err)
return &appError{err, "", http.StatusInternalServerError}
}
desc := fmt.Sprintf("Modified \"%s\"", fileName)
_, err = genNewCommit(repo, headCommit, rootID, user, desc, true, gcID, true)
if err != nil {
if errors.Is(err, ErrGCConflict) {
return &appError{nil, "GC Conflict.\n", http.StatusConflict}
} else {
err := fmt.Errorf("failed to generate new commit: %v", err)
return &appError{err, "", http.StatusInternalServerError}
}
}
if isAjax {
retJSON, err := formatUpdateJSONRet(fileName, fileID, size)
if err != nil {
err := fmt.Errorf("failed to format json data")
return &appError{err, "", http.StatusInternalServerError}
}
rsp.Header().Set("Content-Type", "application/json; charset=utf-8")
rsp.Write(retJSON)
} else {
rsp.Write([]byte(fileID))
}
go mergeVirtualRepoPool.AddTask(repo.ID)
return nil
}
func formatUpdateJSONRet(fileName, fileID string, size int64) ([]byte, error) {
var array []map[string]interface{}
obj := make(map[string]interface{})
obj["name"] = fileName
obj["id"] = fileID
obj["size"] = size
array = append(array, obj)
jsonstr, err := json.Marshal(array)
if err != nil {
err := fmt.Errorf("failed to convert array to json")
return nil, err
}
return jsonstr, nil
}
func checkFileExists(storeID, rootID, parentDir, fileName string) (bool, error) {
dir, err := fsmgr.GetSeafdirByPath(storeID, rootID, parentDir)
if err != nil {
err := fmt.Errorf("parent_dir %s doesn't exist in repo %s: %v", parentDir, storeID, err)
return false, err
}
var ret bool
entries := dir.Entries
for _, de := range entries {
if de.Name == fileName {
ret = true
break
}
}
return ret, nil
}
func uploadBlksAPICB(rsp http.ResponseWriter, r *http.Request) *appError {
fsm, err := parseUploadHeaders(r)
if err != nil {
formatJSONError(rsp, err)
return err
}
if err := doUploadBlks(rsp, r, fsm); err != nil {
formatJSONError(rsp, err)
return err
}
return nil
}
func doUploadBlks(rsp http.ResponseWriter, r *http.Request, fsm *recvData) *appError {
if err := r.ParseMultipartForm(1 << 20); err != nil {
return &appError{nil, "", http.StatusBadRequest}
}
defer r.MultipartForm.RemoveAll()
repoID := fsm.repoID
user := fsm.user
replaceStr := r.FormValue("replace")
var replaceExisted bool
if replaceStr != "" {
replace, err := strconv.ParseInt(replaceStr, 10, 64)
if err != nil || (replace != 0 && replace != 1) {
msg := "Invalid argument replace.\n"
return &appError{nil, msg, http.StatusBadRequest}
}
if replace == 1 {
replaceExisted = true
}
}
parentDir := normalizeUTF8Path(r.FormValue("parent_dir"))
if parentDir == "" {
msg := "No parent_dir given.\n"
return &appError{nil, msg, http.StatusBadRequest}
}
lastModifyStr := normalizeUTF8Path(r.FormValue("last_modify"))
var lastModify int64
if lastModifyStr != "" {
t, err := time.Parse(time.RFC3339, lastModifyStr)
if err == nil {
lastModify = t.Unix()
}
}
fileName := normalizeUTF8Path(r.FormValue("file_name"))
if fileName == "" {
msg := "No file_name given.\n"
return &appError{nil, msg, http.StatusBadRequest}
}
fileSizeStr := r.FormValue("file_size")
var fileSize int64 = -1
if fileSizeStr != "" {
size, err := strconv.ParseInt(fileSizeStr, 10, 64)
if err != nil {
msg := "Invalid argument file_size.\n"
return &appError{nil, msg, http.StatusBadRequest}
}
fileSize = size
}
if fileSize < 0 {
msg := "Invalid file size.\n"
return &appError{nil, msg, http.StatusBadRequest}
}
commitOnlyStr, ok := r.Form["commitonly"]
if !ok || len(commitOnlyStr) == 0 {
msg := "Only commit supported.\n"
return &appError{nil, msg, http.StatusBadRequest}
}
if err := checkParentDir(repoID, parentDir); err != nil {
return err
}
blockIDsJSON := r.FormValue("blockids")
if blockIDsJSON == "" {
msg := "No blockids given.\n"
return &appError{nil, msg, http.StatusBadRequest}
}
fileID, appErr := commitFileBlocks(repoID, parentDir, fileName, blockIDsJSON, user, fileSize, replaceExisted, lastModify)
if appErr != nil {
return appErr
}
_, ok = r.Form["ret-json"]
if ok {
obj := make(map[string]interface{})
obj["id"] = fileID
jsonstr, err := json.Marshal(obj)
if err != nil {
err := fmt.Errorf("failed to convert array to json: %v", err)
return &appError{err, "", http.StatusInternalServerError}
}
rsp.Header().Set("Content-Type", "application/json; charset=utf-8")
rsp.Write([]byte(jsonstr))
} else {
rsp.Header().Set("Content-Type", "application/json; charset=utf-8")
rsp.Write([]byte("\""))
rsp.Write([]byte(fileID))
rsp.Write([]byte("\""))
}
return nil
}
func commitFileBlocks(repoID, parentDir, fileName, blockIDsJSON, user string, fileSize int64, replace bool, lastModify int64) (string, *appError) {
repo := repomgr.Get(repoID)
if repo == nil {
msg := "Failed to get repo.\n"
err := fmt.Errorf("Failed to get repo %s", repoID)
return "", &appError{err, msg, http.StatusInternalServerError}
}
headCommit, err := commitmgr.Load(repo.ID, repo.HeadCommitID)
if err != nil {
msg := "Failed to get head commit.\n"
err := fmt.Errorf("failed to get head commit for repo %s", repo.ID)
return "", &appError{err, msg, http.StatusInternalServerError}
}
canonPath := getCanonPath(parentDir)
if shouldIgnoreFile(fileName) {
msg := fmt.Sprintf("invalid fileName: %s.\n", fileName)
return "", &appError{nil, msg, http.StatusBadRequest}
}
if strings.Contains(parentDir, "//") {
msg := "parent_dir contains // sequence.\n"
return "", &appError{nil, msg, http.StatusBadRequest}
}
var blkIDs []string
err = json.Unmarshal([]byte(blockIDsJSON), &blkIDs)
if err != nil {
err := fmt.Errorf("failed to decode data to json: %v", err)
return "", &appError{err, "", http.StatusInternalServerError}
}
appErr := checkQuotaBeforeCommitBlocks(repo.StoreID, blkIDs)
if appErr != nil {
return "", appErr
}
gcID, err := repomgr.GetCurrentGCID(repo.StoreID)
if err != nil {
err := fmt.Errorf("failed to get current gc id: %v", err)
return "", &appError{err, "", http.StatusInternalServerError}
}
fileID, appErr := indexExistedFileBlocks(repoID, repo.Version, blkIDs, fileSize)
if appErr != nil {
return "", appErr
}
mtime := time.Now().Unix()
if lastModify > 0 {
mtime = lastModify
}
mode := (syscall.S_IFREG | 0644)
newDent := fsmgr.NewDirent(fileID, fileName, uint32(mode), mtime, user, fileSize)
var names []string
rootID, err := doPostMultiFiles(repo, headCommit.RootID, canonPath, []*fsmgr.SeafDirent{newDent}, user, replace, &names)
if err != nil {
err := fmt.Errorf("failed to post file %s to %s in repo %s: %v", fileName, canonPath, repo.ID, err)
return "", &appError{err, "", http.StatusInternalServerError}
}
desc := fmt.Sprintf("Added \"%s\"", fileName)
_, err = genNewCommit(repo, headCommit, rootID, user, desc, true, gcID, true)
if err != nil {
if errors.Is(err, ErrGCConflict) {
return "", &appError{nil, "GC Conflict.\n", http.StatusConflict}
} else {
err := fmt.Errorf("failed to generate new commit: %v", err)
return "", &appError{err, "", http.StatusInternalServerError}
}
}
return fileID, nil
}
func checkQuotaBeforeCommitBlocks(storeID string, blockIDs []string) *appError {
var totalSize int64
for _, blkID := range blockIDs {
size, err := blockmgr.Stat(storeID, blkID)
if err != nil {
err := fmt.Errorf("failed to stat block %s in store %s: %v", blkID, storeID, err)
return &appError{err, "", http.StatusInternalServerError}
}
totalSize += size
}
ret, err := checkQuota(storeID, totalSize)
if err != nil {
msg := "Internal error.\n"
err := fmt.Errorf("failed to check quota: %v", err)
return &appError{err, msg, http.StatusInternalServerError}
}
if ret == 1 {
msg := "Out of quota.\n"
return &appError{nil, msg, seafHTTPResNoQuota}
}
return nil
}
func indexExistedFileBlocks(repoID string, version int, blkIDs []string, fileSize int64) (string, *appError) {
if len(blkIDs) == 0 {
return fsmgr.EmptySha1, nil
}
for _, blkID := range blkIDs {
if !blockmgr.Exists(repoID, blkID) {
err := fmt.Errorf("failed to check block: %s", blkID)
return "", &appError{err, "", seafHTTPResBlockMissing}
}
}
fileID, err := writeSeafile(repoID, version, fileSize, blkIDs)
if err != nil {
err := fmt.Errorf("failed to write seafile: %v", err)
return "", &appError{err, "", http.StatusInternalServerError}
}
return fileID, nil
}
func uploadRawBlksAPICB(rsp http.ResponseWriter, r *http.Request) *appError {
fsm, err := parseUploadHeaders(r)
if err != nil {
formatJSONError(rsp, err)
return err
}
if err := doUploadRawBlks(rsp, r, fsm); err != nil {
formatJSONError(rsp, err)
return err
}
return nil
}
func doUploadRawBlks(rsp http.ResponseWriter, r *http.Request, fsm *recvData) *appError {
if err := r.ParseMultipartForm(1 << 20); err != nil {
return &appError{nil, "", http.StatusBadRequest}
}
defer r.MultipartForm.RemoveAll()
repoID := fsm.repoID
user := fsm.user
formFiles := r.MultipartForm.File
fileHeaders, ok := formFiles["file"]
if !ok {
msg := "No file in multipart form.\n"
return &appError{nil, msg, http.StatusBadRequest}
}
for _, handler := range fileHeaders {
fileName := filepath.Base(handler.Filename)
fsm.fileNames = append(fsm.fileNames, fileName)
fsm.fileHeaders = append(fsm.fileHeaders, handler)
}
if fsm.fileNames == nil {
msg := "No file.\n"
return &appError{nil, msg, http.StatusBadRequest}
}
if err := checkTmpFileList(fsm); err != nil {
return err
}
if err := postBlocks(repoID, user, fsm); err != nil {
return err
}
var contentLen int64
lenstr := r.Header.Get("Content-Length")
if lenstr != "" {
conLen, err := strconv.ParseInt(lenstr, 10, 64)
if err != nil {
msg := "Internal error.\n"
err := fmt.Errorf("failed to parse content len: %v", err)
return &appError{err, msg, http.StatusInternalServerError}
}
contentLen = conLen
}
oper := "web-file-upload"
sendStatisticMsg(repoID, user, oper, uint64(contentLen))
rsp.Header().Set("Content-Type", "application/json; charset=utf-8")
rsp.Write([]byte("\"OK\""))
return nil
}
func postBlocks(repoID, user string, fsm *recvData) *appError {
blockIDs := fsm.fileNames
fileHeaders := fsm.fileHeaders
repo := repomgr.Get(repoID)
if repo == nil {
msg := "Failed to get repo.\n"
err := fmt.Errorf("Failed to get repo %s", repoID)
return &appError{err, msg, http.StatusInternalServerError}
}
if err := indexRawBlocks(repo.StoreID, blockIDs, fileHeaders); err != nil {
err := fmt.Errorf("failed to index file blocks")
return &appError{err, "", http.StatusInternalServerError}
}
go updateSizePool.AddTask(repo.ID)
return nil
}
func indexRawBlocks(repoID string, blockIDs []string, fileHeaders []*multipart.FileHeader) error {
for i, handler := range fileHeaders {
var buf bytes.Buffer
f, err := handler.Open()
if err != nil {
err := fmt.Errorf("failed to open file for read: %v", err)
return err
}
_, err = buf.ReadFrom(f)
if err != nil {
err := fmt.Errorf("failed to read block: %v", err)
return err
}
checkSum := sha1.Sum(buf.Bytes())
blkID := hex.EncodeToString(checkSum[:])
if blkID != blockIDs[i] {
err := fmt.Errorf("block id %s:%s doesn't match content", blkID, blockIDs[i])
return err
}
err = blockmgr.Write(repoID, blkID, &buf)
if err != nil {
err := fmt.Errorf("failed to write block: %s/%s: %v", repoID, blkID, err)
return err
}
}
return nil
}
/*
func uploadLinkCB(rsp http.ResponseWriter, r *http.Request) *appError {
if seahubPK == "" {
err := fmt.Errorf("no seahub private key is configured")
return &appError{err, "", http.StatusNotFound}
}
if r.Method == "OPTIONS" {
setAccessControl(rsp)
rsp.WriteHeader(http.StatusOK)
return nil
}
fsm, err := parseUploadLinkHeaders(r)
if err != nil {
return err
}
if err := doUpload(rsp, r, fsm, false); err != nil {
formatJSONError(rsp, err)
return err
}
return nil
}
func parseUploadLinkHeaders(r *http.Request) (*recvData, *appError) {
tokenLen := 36
parts := strings.Split(r.URL.Path[1:], "/")
if len(parts) < 2 {
msg := "Invalid URL"
return nil, &appError{nil, msg, http.StatusBadRequest}
}
if len(parts[1]) < tokenLen {
msg := "Invalid URL"
return nil, &appError{nil, msg, http.StatusBadRequest}
}
token := parts[1][:tokenLen]
info, appErr := queryShareLinkInfo(token, "upload")
if appErr != nil {
return nil, appErr
}
repoID := info.RepoID
parentDir := normalizeUTF8Path(info.ParentDir)
status, err := repomgr.GetRepoStatus(repoID)
if err != nil {
return nil, &appError{err, "", http.StatusInternalServerError}
}
if status != repomgr.RepoStatusNormal && status != -1 {
msg := "Repo status not writable."
return nil, &appError{nil, msg, http.StatusBadRequest}
}
user, _ := repomgr.GetRepoOwner(repoID)
fsm := new(recvData)
fsm.parentDir = parentDir
fsm.tokenType = "upload-link"
fsm.repoID = repoID
fsm.user = user
fsm.rstart = -1
fsm.rend = -1
fsm.fsize = -1
ranges := r.Header.Get("Content-Range")
if ranges != "" {
parseContentRange(ranges, fsm)
}
return fsm, nil
}
*/
type ShareLinkInfo struct {
RepoID string `json:"repo_id"`
FilePath string `json:"file_path"`
ParentDir string `json:"parent_dir"`
ShareType string `json:"share_type"`
}
func queryShareLinkInfo(token, cookie, opType, ipAddr, userAgent string) (*ShareLinkInfo, *appError) {
tokenString, err := utils.GenSeahubJWTToken()
if err != nil {
err := fmt.Errorf("failed to sign jwt token: %v", err)
return nil, &appError{err, "", http.StatusInternalServerError}
}
url := fmt.Sprintf("%s?type=%s", option.SeahubURL+"/check-share-link-access/", opType)
header := map[string][]string{
"Authorization": {"Token " + tokenString},
}
if cookie != "" {
header["Cookie"] = []string{cookie}
}
req := make(map[string]string)
req["token"] = token
if ipAddr != "" {
req["ip_addr"] = ipAddr
}
if userAgent != "" {
req["user_agent"] = userAgent
}
msg, err := json.Marshal(req)
if err != nil {
err := fmt.Errorf("failed to encode access token: %v", err)
return nil, &appError{err, "", http.StatusInternalServerError}
}
status, body, err := utils.HttpCommon("POST", url, header, bytes.NewReader(msg))
if err != nil {
if status != http.StatusInternalServerError {
return nil, &appError{nil, string(body), status}
} else {
err := fmt.Errorf("failed to get share link info: %v", err)
return nil, &appError{err, "", http.StatusInternalServerError}
}
}
info := new(ShareLinkInfo)
err = json.Unmarshal(body, &info)
if err != nil {
err := fmt.Errorf("failed to decode share link info: %v", err)
return nil, &appError{err, "", http.StatusInternalServerError}
}
return info, nil
}
func accessLinkCB(rsp http.ResponseWriter, r *http.Request) *appError {
if option.JWTPrivateKey == "" {
err := fmt.Errorf("no seahub private key is configured")
return &appError{err, "", http.StatusNotFound}
}
parts := strings.Split(r.URL.Path[1:], "/")
if len(parts) < 2 {
msg := "Invalid URL"
return &appError{nil, msg, http.StatusBadRequest}
}
token := parts[1]
cookie := r.Header.Get("Cookie")
ipAddr := getClientIPAddr(r)
userAgent := r.Header.Get("User-Agent")
info, appErr := queryShareLinkInfo(token, cookie, "file", ipAddr, userAgent)
if appErr != nil {
return appErr
}
if info.FilePath == "" {
msg := "Internal server error\n"
err := fmt.Errorf("failed to get file_path by token %s", token)
return &appError{err, msg, http.StatusInternalServerError}
}
if info.ShareType != "f" {
msg := "Link type mismatch"
return &appError{nil, msg, http.StatusBadRequest}
}
repoID := info.RepoID
filePath := normalizeUTF8Path(info.FilePath)
fileName := filepath.Base(filePath)
op := r.URL.Query().Get("op")
if op != "view" {
op = "download-link"
}
ranges := r.Header["Range"]
byteRanges := strings.Join(ranges, "")
repo := repomgr.Get(repoID)
if repo == nil {
msg := "Bad repo id\n"
return &appError{nil, msg, http.StatusBadRequest}
}
user, _ := repomgr.GetRepoOwner(repoID)
fileID, _, err := fsmgr.GetObjIDByPath(repo.StoreID, repo.RootID, filePath)
if err != nil {
msg := "Invalid file_path\n"
return &appError{nil, msg, http.StatusBadRequest}
}
// Check for file changes by comparing the ETag in the If-None-Match header with the file ID. Set no-cache to allow clients to validate file changes before using the cache.
etag := r.Header.Get("If-None-Match")
if etag == fileID {
return &appError{nil, "", http.StatusNotModified}
}
rsp.Header().Set("ETag", fileID)
rsp.Header().Set("Cache-Control", "public, no-cache")
var cryptKey *seafileCrypt
if repo.IsEncrypted {
key, err := parseCryptKey(rsp, repoID, user, repo.EncVersion)
if err != nil {
return err
}
cryptKey = key
}
exists, _ := fsmgr.Exists(repo.StoreID, fileID)
if !exists {
msg := "Invalid file id"
return &appError{nil, msg, http.StatusBadRequest}
}
if !repo.IsEncrypted && len(byteRanges) != 0 {
if err := doFileRange(rsp, r, repo, fileID, fileName, op, byteRanges, user); err != nil {
return err
}
} else if err := doFile(rsp, r, repo, fileID, fileName, op, cryptKey, user); err != nil {
return err
}
return nil
}
/*
func accessDirLinkCB(rsp http.ResponseWriter, r *http.Request) *appError {
if seahubPK == "" {
err := fmt.Errorf("no seahub private key is configured")
return &appError{err, "", http.StatusNotFound}
}
parts := strings.Split(r.URL.Path[1:], "/")
if len(parts) < 2 {
msg := "Invalid URL"
return &appError{nil, msg, http.StatusBadRequest}
}
token := parts[1]
info, appErr := queryShareLinkInfo(token, "dir")
if appErr != nil {
return appErr
}
repoID := info.RepoID
parentDir := normalizeUTF8Path(info.ParentDir)
op := "download-link"
repo := repomgr.Get(repoID)
if repo == nil {
msg := "Bad repo id\n"
return &appError{nil, msg, http.StatusBadRequest}
}
user, _ := repomgr.GetRepoOwner(repoID)
filePath := r.URL.Query().Get("p")
if filePath == "" {
err := r.ParseForm()
if err != nil {
msg := "Invalid form\n"
return &appError{nil, msg, http.StatusBadRequest}
}
parentDir := r.FormValue("parent_dir")
if parentDir == "" {
msg := "Invalid parent_dir\n"
return &appError{nil, msg, http.StatusBadRequest}
}
parentDir = normalizeUTF8Path(parentDir)
parentDir = getCanonPath(parentDir)
dirents := r.FormValue("dirents")
if dirents == "" {
msg := "Invalid dirents\n"
return &appError{nil, msg, http.StatusBadRequest}
}
// opStr:=r.FormVale("op")
list, err := jsonToDirentList(repo, parentDir, dirents)
if err != nil {
log.Warnf("failed to parse dirent list: %v", err)
msg := "Invalid dirents\n"
return &appError{nil, msg, http.StatusBadRequest}
}
if len(list) == 0 {
msg := "Invalid dirents\n"
return &appError{nil, msg, http.StatusBadRequest}
}
obj := make(map[string]interface{})
if len(list) == 1 {
dent := list[0]
op = "download-dir-link"
obj["dir_name"] = dent.Name
obj["obj_id"] = dent.ID
} else {
op = "download-multi-link"
obj["parent_dir"] = parentDir
var fileList []string
for _, dent := range list {
fileList = append(fileList, dent.Name)
}
obj["file_list"] = fileList
}
data, err := json.Marshal(obj)
if err != nil {
err := fmt.Errorf("failed to encode zip obj: %v", err)
return &appError{err, "", http.StatusInternalServerError}
}
if err := downloadZipFile(rsp, r, string(data), repoID, user, op); err != nil {
return err
}
return nil
}
// file path is not empty string
if _, ok := r.Header["If-Modified-Since"]; ok {
return &appError{nil, "", http.StatusNotModified}
}
filePath = normalizeUTF8Path(filePath)
fullPath := filepath.Join(parentDir, filePath)
fileName := filepath.Base(filePath)
fileID, _, err := fsmgr.GetObjIDByPath(repo.StoreID, repo.RootID, fullPath)
if err != nil {
msg := "Invalid file_path\n"
return &appError{nil, msg, http.StatusBadRequest}
}
rsp.Header().Set("ETag", fileID)
now := time.Now()
rsp.Header().Set("Last-Modified", now.Format("Mon, 2 Jan 2006 15:04:05 GMT"))
rsp.Header().Set("Cache-Control", "max-age=3600")
ranges := r.Header["Range"]
byteRanges := strings.Join(ranges, "")
var cryptKey *seafileCrypt
if repo.IsEncrypted {
key, err := parseCryptKey(rsp, repoID, user, repo.EncVersion)
if err != nil {
return err
}
cryptKey = key
}
exists, _ := fsmgr.Exists(repo.StoreID, fileID)
if !exists {
msg := "Invalid file id"
return &appError{nil, msg, http.StatusBadRequest}
}
if !repo.IsEncrypted && len(byteRanges) != 0 {
if err := doFileRange(rsp, r, repo, fileID, fileName, op, byteRanges, user); err != nil {
return err
}
} else if err := doFile(rsp, r, repo, fileID, fileName, op, cryptKey, user); err != nil {
return err
}
return nil
}
func jsonToDirentList(repo *repomgr.Repo, parentDir, dirents string) ([]*fsmgr.SeafDirent, error) {
var list []string
err := json.Unmarshal([]byte(dirents), &list)
if err != nil {
return nil, err
}
dir, err := fsmgr.GetSeafdirByPath(repo.StoreID, repo.RootID, parentDir)
if err != nil {
return nil, err
}
direntHash := make(map[string]*fsmgr.SeafDirent)
for _, dent := range dir.Entries {
direntHash[dent.Name] = dent
}
var direntList []*fsmgr.SeafDirent
for _, path := range list {
normPath := normalizeUTF8Path(path)
if normPath == "" || normPath == "/" {
return nil, fmt.Errorf("Invalid download file name: %s\n", normPath)
}
dent, ok := direntHash[normPath]
if !ok {
return nil, fmt.Errorf("failed to get dient for %s in dir %s in repo %s", normPath, parentDir, repo.StoreID)
}
direntList = append(direntList, dent)
}
return direntList, nil
}
*/
func removeFileopExpireCache() {
deleteBlockMaps := func(key interface{}, value interface{}) bool {
if blkMap, ok := value.(*blockMap); ok {
if blkMap.expireTime <= time.Now().Unix() {
blockMapCacheTable.Delete(key)
}
}
return true
}
blockMapCacheTable.Range(deleteBlockMaps)
}
================================================
FILE: fileserver/fileserver.go
================================================
// Main package for Seafile file server.
package main
import (
"crypto/tls"
"crypto/x509"
"database/sql"
"flag"
"fmt"
"io"
"net/http"
"os"
"os/signal"
"path/filepath"
"runtime/debug"
"strings"
"syscall"
"time"
"github.com/go-sql-driver/mysql"
"github.com/gorilla/mux"
"github.com/haiwen/seafile-server/fileserver/blockmgr"
"github.com/haiwen/seafile-server/fileserver/commitmgr"
"github.com/haiwen/seafile-server/fileserver/fsmgr"
"github.com/haiwen/seafile-server/fileserver/metrics"
"github.com/haiwen/seafile-server/fileserver/option"
"github.com/haiwen/seafile-server/fileserver/repomgr"
"github.com/haiwen/seafile-server/fileserver/searpc"
"github.com/haiwen/seafile-server/fileserver/share"
"github.com/haiwen/seafile-server/fileserver/utils"
log "github.com/sirupsen/logrus"
"net/http/pprof"
)
var dataDir, absDataDir string
var centralDir string
var logFile, absLogFile string
var rpcPipePath string
var pidFilePath string
var logFp *os.File
var seafileDB, ccnetDB *sql.DB
var logToStdout bool
func init() {
flag.StringVar(¢ralDir, "F", "", "central config directory")
flag.StringVar(&dataDir, "d", "", "seafile data directory")
flag.StringVar(&logFile, "l", "", "log file path")
flag.StringVar(&rpcPipePath, "p", "", "rpc pipe path")
flag.StringVar(&pidFilePath, "P", "", "pid file path")
env := os.Getenv("SEAFILE_LOG_TO_STDOUT")
if env == "true" {
logToStdout = true
}
log.SetFormatter(&LogFormatter{})
}
const (
timestampFormat = "[2006-01-02 15:04:05] "
)
type LogFormatter struct{}
func (f *LogFormatter) Format(entry *log.Entry) ([]byte, error) {
levelStr := entry.Level.String()
if levelStr == "fatal" {
levelStr = "ERROR"
} else {
levelStr = strings.ToUpper(levelStr)
}
level := fmt.Sprintf("[%s] ", levelStr)
appName := ""
if logToStdout {
appName = "[fileserver] "
}
buf := make([]byte, 0, len(appName)+len(timestampFormat)+len(level)+len(entry.Message)+1)
if logToStdout {
buf = append(buf, appName...)
}
buf = entry.Time.AppendFormat(buf, timestampFormat)
buf = append(buf, level...)
buf = append(buf, entry.Message...)
buf = append(buf, '\n')
return buf, nil
}
func loadCcnetDB() {
dbOpt, err := option.LoadDBOption(centralDir)
if err != nil {
log.Fatalf("Failed to load database: %v", err)
}
var dsn string
timeout := "&readTimeout=60s" + "&writeTimeout=60s"
if dbOpt.UseTLS && dbOpt.SkipVerify {
dsn = fmt.Sprintf("%s:%s@tcp(%s:%d)/%s?tls=skip-verify%s", dbOpt.User, dbOpt.Password, dbOpt.Host, dbOpt.Port, dbOpt.CcnetDbName, timeout)
} else if dbOpt.UseTLS && !dbOpt.SkipVerify {
registerCA(dbOpt.CaPath)
dsn = fmt.Sprintf("%s:%s@tcp(%s:%d)/%s?tls=custom%s", dbOpt.User, dbOpt.Password, dbOpt.Host, dbOpt.Port, dbOpt.CcnetDbName, timeout)
} else {
dsn = fmt.Sprintf("%s:%s@tcp(%s:%d)/%s?tls=%t%s", dbOpt.User, dbOpt.Password, dbOpt.Host, dbOpt.Port, dbOpt.CcnetDbName, dbOpt.UseTLS, timeout)
}
if dbOpt.Charset != "" {
dsn = fmt.Sprintf("%s&charset=%s", dsn, dbOpt.Charset)
}
ccnetDB, err = sql.Open("mysql", dsn)
if err != nil {
log.Fatalf("Failed to open database: %v", err)
}
ccnetDB.SetConnMaxLifetime(5 * time.Minute)
ccnetDB.SetMaxOpenConns(8)
ccnetDB.SetMaxIdleConns(8)
}
// registerCA registers CA to verify server cert.
func registerCA(capath string) {
rootCertPool := x509.NewCertPool()
pem, err := os.ReadFile(capath)
if err != nil {
log.Fatal(err)
}
if ok := rootCertPool.AppendCertsFromPEM(pem); !ok {
log.Fatal("Failed to append PEM.")
}
mysql.RegisterTLSConfig("custom", &tls.Config{
RootCAs: rootCertPool,
})
}
func loadSeafileDB() {
dbOpt, err := option.LoadDBOption(centralDir)
if err != nil {
log.Fatalf("Failed to load database: %v", err)
}
var dsn string
timeout := "&readTimeout=60s" + "&writeTimeout=60s"
if dbOpt.UseTLS && dbOpt.SkipVerify {
dsn = fmt.Sprintf("%s:%s@tcp(%s:%d)/%s?tls=skip-verify%s", dbOpt.User, dbOpt.Password, dbOpt.Host, dbOpt.Port, dbOpt.SeafileDbName, timeout)
} else if dbOpt.UseTLS && !dbOpt.SkipVerify {
registerCA(dbOpt.CaPath)
dsn = fmt.Sprintf("%s:%s@tcp(%s:%d)/%s?tls=custom%s", dbOpt.User, dbOpt.Password, dbOpt.Host, dbOpt.Port, dbOpt.SeafileDbName, timeout)
} else {
dsn = fmt.Sprintf("%s:%s@tcp(%s:%d)/%s?tls=%t%s", dbOpt.User, dbOpt.Password, dbOpt.Host, dbOpt.Port, dbOpt.SeafileDbName, dbOpt.UseTLS, timeout)
}
if dbOpt.Charset != "" {
dsn = fmt.Sprintf("%s&charset=%s", dsn, dbOpt.Charset)
}
seafileDB, err = sql.Open("mysql", dsn)
if err != nil {
log.Fatalf("Failed to open database: %v", err)
}
seafileDB.SetConnMaxLifetime(5 * time.Minute)
seafileDB.SetMaxOpenConns(8)
seafileDB.SetMaxIdleConns(8)
}
func writePidFile(pid_file_path string) error {
file, err := os.OpenFile(pid_file_path, os.O_CREATE|os.O_WRONLY, 0664)
if err != nil {
return err
}
defer file.Close()
pid := os.Getpid()
str := fmt.Sprintf("%d", pid)
_, err = file.Write([]byte(str))
if err != nil {
return err
}
return nil
}
func removePidfile(pid_file_path string) error {
err := os.Remove(pid_file_path)
if err != nil {
return err
}
return nil
}
func main() {
flag.Parse()
if centralDir == "" {
log.Fatal("central config directory must be specified.")
}
if pidFilePath != "" {
if writePidFile(pidFilePath) != nil {
log.Fatal("write pid file failed.")
}
}
_, err := os.Stat(centralDir)
if os.IsNotExist(err) {
log.Fatalf("central config directory %s doesn't exist: %v.", centralDir, err)
}
if dataDir == "" {
log.Fatal("seafile data directory must be specified.")
}
_, err = os.Stat(dataDir)
if os.IsNotExist(err) {
log.Fatalf("seafile data directory %s doesn't exist: %v.", dataDir, err)
}
absDataDir, err = filepath.Abs(dataDir)
if err != nil {
log.Fatalf("Failed to convert seafile data dir to absolute path: %v.", err)
}
if logToStdout {
// Use default output (StdOut)
} else if logFile == "" {
absLogFile = filepath.Join(absDataDir, "fileserver.log")
fp, err := os.OpenFile(absLogFile, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0644)
if err != nil {
log.Fatalf("Failed to open or create log file: %v", err)
}
logFp = fp
log.SetOutput(fp)
} else if logFile != "-" {
absLogFile, err = filepath.Abs(logFile)
if err != nil {
log.Fatalf("Failed to convert log file path to absolute path: %v", err)
}
fp, err := os.OpenFile(absLogFile, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0644)
if err != nil {
log.Fatalf("Failed to open or create log file: %v", err)
}
logFp = fp
log.SetOutput(fp)
}
if absLogFile != "" && !logToStdout {
utils.Dup(int(logFp.Fd()), int(os.Stderr.Fd()))
}
// When logFile is "-", use default output (StdOut)
if err := option.LoadSeahubConfig(); err != nil {
log.Fatalf("Failed to read seahub config: %v", err)
}
option.LoadFileServerOptions(centralDir)
loadCcnetDB()
loadSeafileDB()
level, err := log.ParseLevel(option.LogLevel)
if err != nil {
log.Info("use the default log level: info")
log.SetLevel(log.InfoLevel)
} else {
log.SetLevel(level)
}
repomgr.Init(seafileDB)
fsmgr.Init(centralDir, dataDir, option.FsCacheLimit)
blockmgr.Init(centralDir, dataDir)
commitmgr.Init(centralDir, dataDir)
share.Init(ccnetDB, seafileDB, option.GroupTableName, option.CloudMode)
rpcClientInit()
fileopInit()
syncAPIInit()
sizeSchedulerInit()
virtualRepoInit()
initUpload()
metrics.Init()
router := newHTTPRouter()
go handleSignals()
go handleUser1Signal()
log.Print("Seafile file server started.")
server := new(http.Server)
server.Addr = fmt.Sprintf("%s:%d", option.Host, option.Port)
server.Handler = router
err = server.ListenAndServe()
if err != nil {
log.Errorf("File server exiting: %v", err)
}
}
func handleSignals() {
signalChan := make(chan os.Signal, 1)
signal.Notify(signalChan, syscall.SIGINT, syscall.SIGTERM, os.Interrupt)
<-signalChan
metrics.Stop()
removePidfile(pidFilePath)
os.Exit(0)
}
func handleUser1Signal() {
signalChan := make(chan os.Signal, 1)
signal.Notify(signalChan, syscall.SIGUSR1)
for {
<-signalChan
logRotate()
}
}
func logRotate() {
if logToStdout {
return
}
// reopen fileserver log
fp, err := os.OpenFile(absLogFile, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0644)
if err != nil {
log.Fatalf("Failed to reopen fileserver log: %v", err)
}
log.SetOutput(fp)
if logFp != nil {
logFp.Close()
logFp = fp
}
utils.Dup(int(logFp.Fd()), int(os.Stderr.Fd()))
}
var rpcclient *searpc.Client
func rpcClientInit() {
var pipePath string
if rpcPipePath != "" {
pipePath = filepath.Join(rpcPipePath, "seafile.sock")
} else {
pipePath = filepath.Join(absDataDir, "seafile.sock")
}
rpcclient = searpc.Init(pipePath, "seafserv-threaded-rpcserver", 10)
}
func newHTTPRouter() *mux.Router {
r := mux.NewRouter()
r.HandleFunc("/protocol-version{slash:\\/?}", handleProtocolVersion)
r.Handle("/files/{.*}/{.*}", appHandler(accessCB))
r.Handle("/blks/{.*}/{.*}", appHandler(accessBlksCB))
r.Handle("/zip/{.*}", appHandler(accessZipCB))
r.Handle("/upload-api/{.*}", appHandler(uploadAPICB))
r.Handle("/upload-aj/{.*}", appHandler(uploadAjaxCB))
r.Handle("/update-api/{.*}", appHandler(updateAPICB))
r.Handle("/update-aj/{.*}", appHandler(updateAjaxCB))
r.Handle("/upload-blks-api/{.*}", appHandler(uploadBlksAPICB))
r.Handle("/upload-raw-blks-api/{.*}", appHandler(uploadRawBlksAPICB))
// links api
//r.Handle("/u/{.*}", appHandler(uploadLinkCB))
r.Handle("/f/{.*}{slash:\\/?}", appHandler(accessLinkCB))
//r.Handle("/d/{.*}", appHandler(accessDirLinkCB))
r.Handle("/repos/{repoid:[\\da-z]{8}-[\\da-z]{4}-[\\da-z]{4}-[\\da-z]{4}-[\\da-z]{12}}/files/{filepath:.*}", appHandler(accessV2CB))
// file syncing api
r.Handle("/repo/{repoid:[\\da-z]{8}-[\\da-z]{4}-[\\da-z]{4}-[\\da-z]{4}-[\\da-z]{12}}/permission-check{slash:\\/?}",
appHandler(permissionCheckCB))
r.Handle("/repo/{repoid:[\\da-z]{8}-[\\da-z]{4}-[\\da-z]{4}-[\\da-z]{4}-[\\da-z]{12}}/commit/HEAD{slash:\\/?}",
appHandler(headCommitOperCB))
r.Handle("/repo/{repoid:[\\da-z]{8}-[\\da-z]{4}-[\\da-z]{4}-[\\da-z]{4}-[\\da-z]{12}}/commit/{id:[\\da-z]{40}}",
appHandler(commitOperCB))
r.Handle("/repo/{repoid:[\\da-z]{8}-[\\da-z]{4}-[\\da-z]{4}-[\\da-z]{4}-[\\da-z]{12}}/block/{id:[\\da-z]{40}}",
appHandler(blockOperCB))
r.Handle("/repo/{repoid:[\\da-z]{8}-[\\da-z]{4}-[\\da-z]{4}-[\\da-z]{4}-[\\da-z]{12}}/fs-id-list{slash:\\/?}",
appHandler(getFsObjIDCB))
r.Handle("/repo/head-commits-multi{slash:\\/?}",
appHandler(headCommitsMultiCB))
r.Handle("/repo/{repoid:[\\da-z]{8}-[\\da-z]{4}-[\\da-z]{4}-[\\da-z]{4}-[\\da-z]{12}}/pack-fs{slash:\\/?}",
appHandler(packFSCB))
r.Handle("/repo/{repoid:[\\da-z]{8}-[\\da-z]{4}-[\\da-z]{4}-[\\da-z]{4}-[\\da-z]{12}}/check-fs{slash:\\/?}",
appHandler(checkFSCB))
r.Handle("/repo/{repoid:[\\da-z]{8}-[\\da-z]{4}-[\\da-z]{4}-[\\da-z]{4}-[\\da-z]{12}}/check-blocks{slash:\\/?}",
appHandler(checkBlockCB))
r.Handle("/repo/{repoid:[\\da-z]{8}-[\\da-z]{4}-[\\da-z]{4}-[\\da-z]{4}-[\\da-z]{12}}/recv-fs{slash:\\/?}",
appHandler(recvFSCB))
r.Handle("/repo/{repoid:[\\da-z]{8}-[\\da-z]{4}-[\\da-z]{4}-[\\da-z]{4}-[\\da-z]{12}}/quota-check{slash:\\/?}",
appHandler(getCheckQuotaCB))
r.Handle("/repo/{repoid:[\\da-z]{8}-[\\da-z]{4}-[\\da-z]{4}-[\\da-z]{4}-[\\da-z]{12}}/jwt-token{slash:\\/?}",
appHandler(getJWTTokenCB))
// seadrive api
r.Handle("/repo/{repoid:[\\da-z]{8}-[\\da-z]{4}-[\\da-z]{4}-[\\da-z]{4}-[\\da-z]{12}}/block-map/{id:[\\da-z]{40}}",
appHandler(getBlockMapCB))
r.Handle("/accessible-repos{slash:\\/?}", appHandler(getAccessibleRepoListCB))
// pprof
r.Handle("/debug/pprof", &profileHandler{http.HandlerFunc(pprof.Index)})
r.Handle("/debug/pprof/cmdline", &profileHandler{http.HandlerFunc(pprof.Cmdline)})
r.Handle("/debug/pprof/profile", &profileHandler{http.HandlerFunc(pprof.Profile)})
r.Handle("/debug/pprof/symbol", &profileHandler{http.HandlerFunc(pprof.Symbol)})
r.Handle("/debug/pprof/heap", &profileHandler{pprof.Handler("heap")})
r.Handle("/debug/pprof/block", &profileHandler{pprof.Handler("block")})
r.Handle("/debug/pprof/goroutine", &profileHandler{pprof.Handler("goroutine")})
r.Handle("/debug/pprof/threadcreate", &profileHandler{pprof.Handler("threadcreate")})
r.Handle("/debug/pprof/trace", &traceHandler{})
if option.HasRedisOptions {
r.Use(metrics.MetricMiddleware)
}
return r
}
func handleProtocolVersion(rsp http.ResponseWriter, r *http.Request) {
io.WriteString(rsp, "{\"version\": 2}")
}
type appError struct {
Error error
Message string
Code int
}
type appHandler func(http.ResponseWriter, *http.Request) *appError
func (fn appHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
if e := fn(w, r); e != nil {
if e.Error != nil && e.Code == http.StatusInternalServerError {
log.Errorf("path %s internal server error: %v\n", r.URL.Path, e.Error)
}
http.Error(w, e.Message, e.Code)
}
}
func RecoverWrapper(f func()) {
defer func() {
if err := recover(); err != nil {
log.Errorf("panic: %v\n%s", err, debug.Stack())
}
}()
f()
}
type profileHandler struct {
pHandler http.Handler
}
func (p *profileHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
queries := r.URL.Query()
password := queries.Get("password")
if !option.EnableProfiling || password != option.ProfilePassword {
http.Error(w, "", http.StatusUnauthorized)
return
}
p.pHandler.ServeHTTP(w, r)
}
type traceHandler struct {
}
func (p *traceHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
queries := r.URL.Query()
password := queries.Get("password")
if !option.EnableProfiling || password != option.ProfilePassword {
http.Error(w, "", http.StatusUnauthorized)
return
}
pprof.Trace(w, r)
}
================================================
FILE: fileserver/fsmgr/fsmgr.go
================================================
// Package fsmgr manages fs objects
package fsmgr
import (
"bytes"
"compress/zlib"
"crypto/sha1"
"encoding/hex"
"fmt"
"io"
"path/filepath"
"strings"
"sync"
"syscall"
"time"
"unsafe"
"github.com/haiwen/seafile-server/fileserver/objstore"
"github.com/haiwen/seafile-server/fileserver/utils"
jsoniter "github.com/json-iterator/go"
"github.com/dgraph-io/ristretto"
)
var json = jsoniter.ConfigCompatibleWithStandardLibrary
// Seafile is a file object
type Seafile struct {
data []byte
Version int `json:"version"`
FileType int `json:"type"`
FileID string `json:"-"`
FileSize uint64 `json:"size"`
BlkIDs []string `json:"block_ids"`
}
// In the JSON encoding generated by C language, there are spaces after the ',' and ':', and the order of the fields is sorted by the key.
// So it is not compatible with the json library generated by go.
func (file *Seafile) toJSON() ([]byte, error) {
var buf bytes.Buffer
buf.WriteByte('{')
buf.WriteString("\"block_ids\": [")
for i, blkID := range file.BlkIDs {
data, err := json.Marshal(blkID)
if err != nil {
return nil, err
}
buf.Write(data)
if i < len(file.BlkIDs)-1 {
buf.WriteByte(',')
buf.WriteByte(' ')
}
}
buf.WriteByte(']')
buf.WriteByte(',')
buf.WriteByte(' ')
data, err := json.Marshal(file.FileSize)
if err != nil {
return nil, err
}
writeField(&buf, "\"size\"", data)
buf.WriteByte(',')
buf.WriteByte(' ')
data, err = json.Marshal(SeafMetadataTypeFile)
if err != nil {
return nil, err
}
writeField(&buf, "\"type\"", data)
buf.WriteByte(',')
buf.WriteByte(' ')
data, err = json.Marshal(file.Version)
if err != nil {
return nil, err
}
writeField(&buf, "\"version\"", data)
buf.WriteByte('}')
return buf.Bytes(), nil
}
func writeField(buf *bytes.Buffer, key string, value []byte) {
buf.WriteString(key)
buf.WriteByte(':')
buf.WriteByte(' ')
buf.Write(value)
}
// SeafDirent is a dir entry object
type SeafDirent struct {
Mode uint32 `json:"mode"`
ID string `json:"id"`
Name string `json:"name"`
Mtime int64 `json:"mtime"`
Modifier string `json:"modifier"`
Size int64 `json:"size"`
}
func (dent *SeafDirent) toJSON() ([]byte, error) {
var buf bytes.Buffer
buf.WriteByte('{')
data, err := json.Marshal(dent.ID)
if err != nil {
return nil, err
}
writeField(&buf, "\"id\"", data)
buf.WriteByte(',')
buf.WriteByte(' ')
data, err = json.Marshal(dent.Mode)
if err != nil {
return nil, err
}
writeField(&buf, "\"mode\"", data)
buf.WriteByte(',')
buf.WriteByte(' ')
if IsRegular(dent.Mode) {
data, err = jsonNoEscape(dent.Modifier)
if err != nil {
return nil, err
}
writeField(&buf, "\"modifier\"", data)
buf.WriteByte(',')
buf.WriteByte(' ')
}
data, err = json.Marshal(dent.Mtime)
if err != nil {
return nil, err
}
writeField(&buf, "\"mtime\"", data)
buf.WriteByte(',')
buf.WriteByte(' ')
data, err = jsonNoEscape(dent.Name)
if err != nil {
return nil, err
}
writeField(&buf, "\"name\"", data)
if IsRegular(dent.Mode) {
buf.WriteByte(',')
buf.WriteByte(' ')
data, err = json.Marshal(dent.Size)
if err != nil {
return nil, err
}
writeField(&buf, "\"size\"", data)
}
buf.WriteByte('}')
return buf.Bytes(), nil
}
// In golang json, the string is encoded using HTMLEscape, which replaces "<", ">", "&", U+2028, and U+2029 are escaped to "\u003c","\u003e", "\u0026", "\u2028", and "\u2029".
// So it is not compatible with the json library generated by c. This replacement can be disabled when using an Encoder, by calling SetEscapeHTML(false).
func jsonNoEscape(data interface{}) ([]byte, error) {
var buf bytes.Buffer
encoder := json.NewEncoder(&buf)
encoder.SetEscapeHTML(false)
if err := encoder.Encode(data); err != nil {
return nil, err
}
bytes := buf.Bytes()
// Encode will terminate each value with a newline.
// This makes the output look a little nicer
// when debugging, and some kind of space
// is required if the encoded value was a number,
// so that the reader knows there aren't more
// digits coming.
// The newline at the end needs to be removed for the above reasons.
return bytes[:len(bytes)-1], nil
}
// SeafDir is a dir object
type SeafDir struct {
data []byte
Version int `json:"version"`
DirType int `json:"type"`
DirID string `json:"-"`
Entries []*SeafDirent `json:"dirents"`
}
func (dir *SeafDir) toJSON() ([]byte, error) {
var buf bytes.Buffer
buf.WriteByte('{')
buf.WriteString("\"dirents\": [")
for i, entry := range dir.Entries {
data, err := entry.toJSON()
if err != nil {
return nil, err
}
buf.Write(data)
if i < len(dir.Entries)-1 {
buf.WriteByte(',')
buf.WriteByte(' ')
}
}
buf.WriteByte(']')
buf.WriteByte(',')
buf.WriteByte(' ')
data, err := json.Marshal(SeafMetadataTypeDir)
if err != nil {
return nil, err
}
writeField(&buf, "\"type\"", data)
buf.WriteByte(',')
buf.WriteByte(' ')
data, err = json.Marshal(dir.Version)
if err != nil {
return nil, err
}
writeField(&buf, "\"version\"", data)
buf.WriteByte('}')
return buf.Bytes(), nil
}
// FileCountInfo contains information of files
type FileCountInfo struct {
FileCount int64
Size int64
DirCount int64
}
// Meta data type of dir or file
const (
SeafMetadataTypeInvalid = iota
SeafMetadataTypeFile
SeafMetadataTypeLink
SeafMetadataTypeDir
)
var store *objstore.ObjectStore
// Empty value of sha1
const (
EmptySha1 = "0000000000000000000000000000000000000000"
)
// Since zlib library allocates a large amount of memory every time a new reader is created, when the number of calls is too large,
// the GC will be executed frequently, resulting in high CPU usage.
var zlibReaders []io.ReadCloser
var zlibLock sync.Mutex
// Add fs cache, on the one hand to avoid repeated creation and destruction of repeatedly accessed objects,
// on the other hand it will also slow down the speed at which objects are released.
var fsCache *ristretto.Cache
// Init initializes fs manager and creates underlying object store.
func Init(seafileConfPath string, seafileDataDir string, fsCacheLimit int64) {
store = objstore.New(seafileConfPath, seafileDataDir, "fs")
fsCache, _ = ristretto.NewCache(&ristretto.Config{
NumCounters: 1e7, // number of keys to track frequency of (10M).
MaxCost: fsCacheLimit, // maximum cost of cache.
BufferItems: 64, // number of keys per Get buffer.
Cost: calCost,
})
}
func calCost(value interface{}) int64 {
return sizeOf(value)
}
const (
sizeOfString = int64(unsafe.Sizeof(string("")))
sizeOfPointer = int64(unsafe.Sizeof(uintptr(0)))
sizeOfSeafile = int64(unsafe.Sizeof(Seafile{}))
sizeOfSeafDir = int64(unsafe.Sizeof(SeafDir{}))
sizeOfSeafDirent = int64(unsafe.Sizeof(SeafDirent{}))
)
func sizeOf(a interface{}) int64 {
var size int64
switch x := a.(type) {
case string:
return sizeOfString + int64(len(x))
case []string:
for _, s := range x {
size += sizeOf(s)
}
return size
case *Seafile:
size = sizeOfPointer
size += sizeOfSeafile
size += int64(len(x.FileID))
size += sizeOf(x.BlkIDs)
return size
case *SeafDir:
size = sizeOfPointer
size += sizeOfSeafDir
size += int64(len(x.DirID))
for _, dent := range x.Entries {
size += sizeOf(dent)
}
return size
case *SeafDirent:
size = sizeOfPointer
size += sizeOfSeafDirent
size += int64(len(x.ID))
size += int64(len(x.Name))
size += int64(len(x.Modifier))
return size
}
return 0
}
func initZlibReader() (io.ReadCloser, error) {
var buf bytes.Buffer
// Since the corresponding reader has not been obtained when zlib is initialized,
// an io.Reader needs to be built to initialize zlib.
w := zlib.NewWriter(&buf)
w.Close()
r, err := zlib.NewReader(&buf)
if err != nil {
return nil, err
}
return r, nil
}
// GetOneZlibReader gets a zlib reader from zlibReaders.
func GetOneZlibReader() io.ReadCloser {
zlibLock.Lock()
defer zlibLock.Unlock()
var reader io.ReadCloser
if len(zlibReaders) == 0 {
reader, err := initZlibReader()
if err != nil {
return nil
}
return reader
}
reader = zlibReaders[0]
zlibReaders = zlibReaders[1:]
return reader
}
func ReturnOneZlibReader(reader io.ReadCloser) {
if reader == nil {
return
}
zlibLock.Lock()
defer zlibLock.Unlock()
zlibReaders = append(zlibReaders, reader)
}
// NewDirent initializes a SeafDirent object
func NewDirent(id string, name string, mode uint32, mtime int64, modifier string, size int64) *SeafDirent {
dent := new(SeafDirent)
dent.ID = id
if id == "" {
dent.ID = EmptySha1
}
dent.Name = name
dent.Mode = mode
dent.Mtime = mtime
if IsRegular(mode) {
dent.Modifier = modifier
dent.Size = size
}
return dent
}
// NewSeafdir initializes a SeafDir object
func NewSeafdir(version int, entries []*SeafDirent) (*SeafDir, error) {
dir := new(SeafDir)
dir.Version = version
dir.Entries = entries
if len(entries) == 0 {
dir.DirID = EmptySha1
return dir, nil
}
jsonstr, err := dir.toJSON()
if err != nil {
err := fmt.Errorf("failed to convert seafdir to json")
return nil, err
}
dir.data = jsonstr
checksum := sha1.Sum(jsonstr)
dir.DirID = hex.EncodeToString(checksum[:])
return dir, nil
}
// NewSeafile initializes a Seafile object
func NewSeafile(version int, fileSize int64, blkIDs []string) (*Seafile, error) {
seafile := new(Seafile)
seafile.Version = version
seafile.FileSize = uint64(fileSize)
seafile.BlkIDs = blkIDs
if len(blkIDs) == 0 {
seafile.FileID = EmptySha1
return seafile, nil
}
jsonstr, err := seafile.toJSON()
if err != nil {
err := fmt.Errorf("failed to convert seafile to json")
return nil, err
}
seafile.data = jsonstr
checkSum := sha1.Sum(jsonstr)
seafile.FileID = hex.EncodeToString(checkSum[:])
return seafile, nil
}
func uncompress(p []byte, reader io.ReadCloser) ([]byte, error) {
b := bytes.NewReader(p)
var out bytes.Buffer
if reader == nil {
r, err := zlib.NewReader(b)
if err != nil {
return nil, err
}
_, err = io.Copy(&out, r)
if err != nil {
r.Close()
return nil, err
}
r.Close()
return out.Bytes(), nil
}
// resue the old zlib reader.
resetter, _ := reader.(zlib.Resetter)
err := resetter.Reset(b, nil)
if err != nil {
return nil, err
}
_, err = io.Copy(&out, reader)
if err != nil {
return nil, err
}
return out.Bytes(), nil
}
func compress(p []byte) ([]byte, error) {
var out bytes.Buffer
w := zlib.NewWriter(&out)
_, err := w.Write(p)
if err != nil {
w.Close()
return nil, err
}
w.Close()
return out.Bytes(), nil
}
// FromData reads from p and converts JSON-encoded data to Seafile.
func (seafile *Seafile) FromData(p []byte, reader io.ReadCloser) error {
b, err := uncompress(p, reader)
if err != nil {
return err
}
err = json.Unmarshal(b, seafile)
if err != nil {
return err
}
if seafile.FileType != SeafMetadataTypeFile {
return fmt.Errorf("object %s is not a file", seafile.FileID)
}
if seafile.Version < 1 {
return fmt.Errorf("seafile object %s version should be > 0, version is %d", seafile.FileID, seafile.Version)
}
if seafile.BlkIDs == nil {
return fmt.Errorf("no block id array in seafile object %s", seafile.FileID)
}
for _, blkID := range seafile.BlkIDs {
if !utils.IsObjectIDValid(blkID) {
return fmt.Errorf("block id %s is invalid", blkID)
}
}
return nil
}
// ToData converts seafile to JSON-encoded data and writes to w.
func (seafile *Seafile) ToData(w io.Writer) error {
buf, err := compress(seafile.data)
if err != nil {
return err
}
_, err = w.Write(buf)
if err != nil {
return err
}
return nil
}
// ToData converts seafdir to JSON-encoded data and writes to w.
func (seafdir *SeafDir) ToData(w io.Writer) error {
buf, err := compress(seafdir.data)
if err != nil {
return err
}
_, err = w.Write(buf)
if err != nil {
return err
}
return nil
}
// FromData reads from p and converts JSON-encoded data to SeafDir.
func (seafdir *SeafDir) FromData(p []byte, reader io.ReadCloser) error {
b, err := uncompress(p, reader)
if err != nil {
return err
}
err = json.Unmarshal(b, seafdir)
if err != nil {
return err
}
if seafdir.DirType != SeafMetadataTypeDir {
return fmt.Errorf("object %s is not a dir", seafdir.DirID)
}
if seafdir.Version < 1 {
return fmt.Errorf("dir object %s version should be > 0, version is %d", seafdir.DirID, seafdir.Version)
}
if seafdir.Entries == nil {
return fmt.Errorf("no dirents in dir object %s", seafdir.DirID)
}
for _, dent := range seafdir.Entries {
if !utils.IsObjectIDValid(dent.ID) {
return fmt.Errorf("dirent id %s is invalid", dent.ID)
}
}
return nil
}
// ReadRaw reads data in binary format from storage backend.
func ReadRaw(repoID string, objID string, w io.Writer) error {
err := store.Read(repoID, objID, w)
if err != nil {
return err
}
return nil
}
// WriteRaw writes data in binary format to storage backend.
func WriteRaw(repoID string, objID string, r io.Reader) error {
err := store.Write(repoID, objID, r, false)
if err != nil {
return err
}
return nil
}
// GetSeafile gets seafile from storage backend.
func GetSeafile(repoID string, fileID string) (*Seafile, error) {
return getSeafile(repoID, fileID, nil)
}
// GetSeafileWithZlibReader gets seafile from storage backend with a zlib reader.
func GetSeafileWithZlibReader(repoID string, fileID string, reader io.ReadCloser) (*Seafile, error) {
return getSeafile(repoID, fileID, reader)
}
func getSeafile(repoID string, fileID string, reader io.ReadCloser) (*Seafile, error) {
var buf bytes.Buffer
seafile := new(Seafile)
if fileID == EmptySha1 {
seafile.FileID = EmptySha1
return seafile, nil
}
seafile.FileID = fileID
err := ReadRaw(repoID, fileID, &buf)
if err != nil {
errors := fmt.Errorf("failed to read seafile object from storage : %v", err)
return nil, errors
}
err = seafile.FromData(buf.Bytes(), reader)
if err != nil {
errors := fmt.Errorf("failed to parse seafile object %s/%s : %v", repoID, fileID, err)
return nil, errors
}
if seafile.Version < 1 {
errors := fmt.Errorf("seafile object %s/%s version should be > 0", repoID, fileID)
return nil, errors
}
return seafile, nil
}
// SaveSeafile saves seafile to storage backend.
func SaveSeafile(repoID string, seafile *Seafile) error {
fileID := seafile.FileID
if fileID == EmptySha1 {
return nil
}
exist, _ := store.Exists(repoID, fileID)
if exist {
return nil
}
seafile.FileType = SeafMetadataTypeFile
var buf bytes.Buffer
err := seafile.ToData(&buf)
if err != nil {
errors := fmt.Errorf("failed to convert seafile object %s/%s to json", repoID, fileID)
return errors
}
err = WriteRaw(repoID, fileID, &buf)
if err != nil {
errors := fmt.Errorf("failed to write seafile object to storage : %v", err)
return errors
}
return nil
}
// GetSeafdir gets seafdir from storage backend.
func GetSeafdir(repoID string, dirID string) (*SeafDir, error) {
return getSeafdir(repoID, dirID, nil, false)
}
// GetSeafdir gets seafdir from storage backend with a zlib reader.
func GetSeafdirWithZlibReader(repoID string, dirID string, reader io.ReadCloser) (*SeafDir, error) {
return getSeafdir(repoID, dirID, reader, true)
}
func getSeafdir(repoID string, dirID string, reader io.ReadCloser, useCache bool) (*SeafDir, error) {
var seafdir *SeafDir
if useCache {
seafdir = getSeafdirFromCache(repoID, dirID)
if seafdir != nil {
return seafdir, nil
}
}
var buf bytes.Buffer
seafdir = new(SeafDir)
if dirID == EmptySha1 {
seafdir.DirID = EmptySha1
return seafdir, nil
}
seafdir.DirID = dirID
err := ReadRaw(repoID, dirID, &buf)
if err != nil {
errors := fmt.Errorf("failed to read seafdir object from storage : %v", err)
return nil, errors
}
err = seafdir.FromData(buf.Bytes(), reader)
if err != nil {
errors := fmt.Errorf("failed to parse seafdir object %s/%s : %v", repoID, dirID, err)
return nil, errors
}
if seafdir.Version < 1 {
errors := fmt.Errorf("seadir object %s/%s version should be > 0", repoID, dirID)
return nil, errors
}
if useCache {
setSeafdirToCache(repoID, seafdir)
}
return seafdir, nil
}
func getSeafdirFromCache(repoID string, dirID string) *SeafDir {
key := repoID + dirID
v, ok := fsCache.Get(key)
if !ok {
return nil
}
seafdir, ok := v.(*SeafDir)
if ok {
return seafdir
}
return nil
}
func setSeafdirToCache(repoID string, seafdir *SeafDir) error {
key := repoID + seafdir.DirID
fsCache.SetWithTTL(key, seafdir, 0, time.Duration(1*time.Hour))
return nil
}
// SaveSeafdir saves seafdir to storage backend.
func SaveSeafdir(repoID string, seafdir *SeafDir) error {
dirID := seafdir.DirID
if dirID == EmptySha1 {
return nil
}
exist, _ := store.Exists(repoID, dirID)
if exist {
return nil
}
seafdir.DirType = SeafMetadataTypeDir
var buf bytes.Buffer
err := seafdir.ToData(&buf)
if err != nil {
errors := fmt.Errorf("failed to convert seafdir object %s/%s to json", repoID, dirID)
return errors
}
err = WriteRaw(repoID, dirID, &buf)
if err != nil {
errors := fmt.Errorf("failed to write seafdir object to storage : %v", err)
return errors
}
return nil
}
// Exists check if fs object is exists.
func Exists(repoID string, objID string) (bool, error) {
if objID == EmptySha1 {
return true, nil
}
return store.Exists(repoID, objID)
}
func comp(c rune) bool {
return c == '/'
}
// IsDir check if the mode is dir.
func IsDir(m uint32) bool {
return (m & syscall.S_IFMT) == syscall.S_IFDIR
}
// IsRegular Check if the mode is regular.
func IsRegular(m uint32) bool {
return (m & syscall.S_IFMT) == syscall.S_IFREG
}
// ErrPathNoExist is an error indicating that the file does not exist
var ErrPathNoExist = fmt.Errorf("path does not exist")
// GetSeafdirByPath gets the object of seafdir by path.
func GetSeafdirByPath(repoID string, rootID string, path string) (*SeafDir, error) {
dir, err := GetSeafdir(repoID, rootID)
if err != nil {
errors := fmt.Errorf("directory is missing")
return nil, errors
}
path = filepath.Join("/", path)
parts := strings.FieldsFunc(path, comp)
var dirID string
for _, name := range parts {
entries := dir.Entries
for _, v := range entries {
if v.Name == name && IsDir(v.Mode) {
dirID = v.ID
break
}
}
if dirID == `` {
return nil, ErrPathNoExist
}
dir, err = GetSeafdir(repoID, dirID)
if err != nil {
errors := fmt.Errorf("directory is missing")
return nil, errors
}
}
return dir, nil
}
// GetSeafdirIDByPath gets the dirID of SeafDir by path.
func GetSeafdirIDByPath(repoID, rootID, path string) (string, error) {
dirID, mode, err := GetObjIDByPath(repoID, rootID, path)
if err != nil {
err := fmt.Errorf("failed to get dir id by path: %s: %w", path, err)
return "", err
}
if dirID == "" || !IsDir(mode) {
return "", nil
}
return dirID, nil
}
// GetObjIDByPath gets the obj id by path
func GetObjIDByPath(repoID, rootID, path string) (string, uint32, error) {
var name string
var baseDir *SeafDir
formatPath := filepath.Join(path)
if len(formatPath) == 0 || formatPath == "/" {
return rootID, syscall.S_IFDIR, nil
}
index := strings.Index(formatPath, "/")
if index < 0 {
dir, err := GetSeafdir(repoID, rootID)
if err != nil {
err := fmt.Errorf("failed to find root dir %s: %v", rootID, err)
return "", 0, err
}
name = formatPath
baseDir = dir
} else {
name = filepath.Base(formatPath)
dirName := filepath.Dir(formatPath)
dir, err := GetSeafdirByPath(repoID, rootID, dirName)
if err != nil {
if err == ErrPathNoExist {
return "", syscall.S_IFDIR, ErrPathNoExist
}
err := fmt.Errorf("failed to find dir %s in repo %s: %v", dirName, repoID, err)
return "", syscall.S_IFDIR, err
}
baseDir = dir
}
entries := baseDir.Entries
for _, de := range entries {
if de.Name == name {
return de.ID, de.Mode, nil
}
}
return "", 0, nil
}
// GetFileCountInfoByPath gets the count info of file by path.
func GetFileCountInfoByPath(repoID, rootID, path string) (*FileCountInfo, error) {
dirID, err := GetSeafdirIDByPath(repoID, rootID, path)
if err != nil {
err := fmt.Errorf("failed to get file count info for repo %s path %s: %v", repoID, path, err)
return nil, err
}
info, err := getFileCountInfo(repoID, dirID)
if err != nil {
err := fmt.Errorf("failed to get file count in repo %s: %v", repoID, err)
return nil, err
}
return info, nil
}
func getFileCountInfo(repoID, dirID string) (*FileCountInfo, error) {
dir, err := GetSeafdir(repoID, dirID)
if err != nil {
err := fmt.Errorf("failed to get dir: %v", err)
return nil, err
}
info := new(FileCountInfo)
entries := dir.Entries
for _, de := range entries {
if IsDir(de.Mode) {
tmpInfo, err := getFileCountInfo(repoID, de.ID)
if err != nil {
err := fmt.Errorf("failed to get file count: %v", err)
return nil, err
}
info.DirCount = tmpInfo.DirCount + 1
info.FileCount += tmpInfo.FileCount
info.Size += tmpInfo.Size
} else {
info.FileCount++
info.Size += de.Size
}
}
return info, nil
}
func GetDirentByPath(repoID, rootID, rpath string) (*SeafDirent, error) {
parentDir := filepath.Dir(rpath)
fileName := filepath.Base(rpath)
var dir *SeafDir
var err error
if parentDir == "." {
dir, err = GetSeafdir(repoID, rootID)
if err != nil {
return nil, err
}
} else {
dir, err = GetSeafdirByPath(repoID, rootID, parentDir)
if err != nil {
return nil, err
}
}
for _, de := range dir.Entries {
if de.Name == fileName {
return de, nil
}
}
return nil, ErrPathNoExist
}
================================================
FILE: fileserver/fsmgr/fsmgr_test.go
================================================
package fsmgr
import (
"fmt"
"os"
"testing"
)
const (
seafileConfPath = "/tmp/conf"
seafileDataDir = "/tmp/conf/seafile-data"
repoID = "b1f2ad61-9164-418a-a47f-ab805dbd5694"
blkID = "0401fc662e3bc87a41f299a907c056aaf8322a26"
subDirID = "0401fc662e3bc87a41f299a907c056aaf8322a27"
)
var dirID string
var fileID string
func createFile() error {
var blkIDs []string
for i := 0; i < 2; i++ {
blkshal := blkID
blkIDs = append(blkIDs, blkshal)
}
seafile, err := NewSeafile(1, 100, blkIDs)
if err != nil {
return err
}
err = SaveSeafile(repoID, seafile)
if err != nil {
return err
}
fileID = seafile.FileID
var entries []*SeafDirent
for i := 0; i < 2; i++ {
dirent := SeafDirent{ID: subDirID, Name: "/", Mode: 0x4000}
entries = append(entries, &dirent)
}
seafdir, err := NewSeafdir(1, entries)
if err != nil {
err := fmt.Errorf("failed to new seafdir: %v", err)
return err
}
err = SaveSeafdir(repoID, seafdir)
if err != nil {
return err
}
dirID = seafdir.DirID
return nil
}
func delFile() error {
err := os.RemoveAll(seafileConfPath)
if err != nil {
return err
}
return nil
}
func TestMain(m *testing.M) {
Init(seafileConfPath, seafileDataDir, 2<<30)
err := createFile()
if err != nil {
fmt.Printf("Failed to create test file : %v.\n", err)
os.Exit(1)
}
code := m.Run()
err = delFile()
if err != nil {
fmt.Printf("Failed to remove test file : %v\n", err)
}
os.Exit(code)
}
func TestGetSeafile(t *testing.T) {
exists, err := Exists(repoID, fileID)
if !exists {
t.Errorf("seafile is not exists : %v.\n", err)
}
seafile, err := GetSeafile(repoID, fileID)
if err != nil || seafile == nil {
t.Errorf("Failed to get seafile : %v.\n", err)
t.FailNow()
}
for _, v := range seafile.BlkIDs {
if v != blkID {
t.Errorf("Wrong file content.\n")
}
}
}
func TestGetSeafdir(t *testing.T) {
exists, err := Exists(repoID, dirID)
if !exists {
t.Errorf("seafile is not exists : %v.\n", err)
}
seafdir, err := GetSeafdir(repoID, dirID)
if err != nil || seafdir == nil {
t.Errorf("Failed to get seafdir : %v.\n", err)
t.FailNow()
}
for _, v := range seafdir.Entries {
if v.ID != subDirID {
t.Errorf("Wrong file content.\n")
}
}
}
func TestGetSeafdirByPath(t *testing.T) {
seafdir, err := GetSeafdirByPath(repoID, dirID, "/")
if err != nil || seafdir == nil {
t.Errorf("Failed to get seafdir : %v.\n", err)
t.FailNow()
}
for _, v := range seafdir.Entries {
if v.ID != subDirID {
t.Errorf("Wrong file content.\n")
}
}
}
================================================
FILE: fileserver/go.mod
================================================
module github.com/haiwen/seafile-server/fileserver
go 1.22
require (
github.com/dgraph-io/ristretto v0.2.0
github.com/go-redis/redis/v8 v8.11.5
github.com/go-sql-driver/mysql v1.5.0
github.com/golang-jwt/jwt/v5 v5.2.2
github.com/google/uuid v1.1.1
github.com/gorilla/mux v1.7.4
github.com/json-iterator/go v1.1.12
github.com/sirupsen/logrus v1.9.3
golang.org/x/text v0.3.8
gopkg.in/ini.v1 v1.55.0
)
require (
github.com/cespare/xxhash/v2 v2.1.2 // indirect
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
github.com/dustin/go-humanize v1.0.1 // indirect
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421 // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/smartystreets/goconvey v1.6.4 // indirect
golang.org/x/sys v0.11.0 // indirect
)
================================================
FILE: fileserver/go.sum
================================================
github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dgraph-io/ristretto v0.2.0 h1:XAfl+7cmoUDWW/2Lx8TGZQjjxIQ2Ley9DSf52dru4WE=
github.com/dgraph-io/ristretto v0.2.0/go.mod h1:8uBHCU/PBV4Ag0CJrP47b9Ofby5dqWNh4FicAdoqFNU=
github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 h1:fAjc9m62+UWV/WAFKLNi6ZS0675eEUC9y3AlwSbQu1Y=
github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
github.com/go-redis/redis/v8 v8.11.5 h1:AcZZR7igkdvfVmQTPnu9WE37LRrO/YrBH5zWyjDC0oI=
github.com/go-redis/redis/v8 v8.11.5/go.mod h1:gREzHqY1hg6oD9ngVRbLStwAWKhA0FEgq8Jd4h5lpwo=
github.com/go-sql-driver/mysql v1.5.0 h1:ozyZYNQW3x3HtqT1jira07DN2PArx2v7/mN66gGcHOs=
github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
github.com/golang-jwt/jwt/v5 v5.2.2 h1:Rl4B7itRWVtYIHFrSNd7vhTiz9UpLdi6gZhZ3wEeDy8=
github.com/golang-jwt/jwt/v5 v5.2.2/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY=
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/gorilla/mux v1.7.4 h1:VuZ8uybHlWmqV03+zRzdwKL4tUnIp1MAQtp1mIFE1bc=
github.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421 h1:ZqeYNhU3OHLH3mGKHDcjJRFFRrJa6eAM5H+CtDdOsPc=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=
github.com/onsi/gomega v1.18.1 h1:M1GfJqGRrBrrGGsbxzV5dqM2U2ApXefZCQpkukxYRLE=
github.com/onsi/gomega v1.18.1/go.mod h1:0q+aL8jAiMXy9hbwj2mr5GziHiwhAIQpFmmtT5hitRs=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s=
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20210428140749-89ef3d95e781 h1:DzZ89McO9/gWPsQXS/FVKAlG02ZjaQ6AlZRBimEYOd0=
golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM=
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.8 h1:nAL+RVCQ9uMn3vJZbV+MRnydTJFPf8qqY42YiA6MrqY=
golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/ini.v1 v1.55.0 h1:E8yzL5unfpW3M6fz/eB7Cb5MQAYSZ7GKo4Qth+N2sgQ=
gopkg.in/ini.v1 v1.55.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
================================================
FILE: fileserver/http_code.go
================================================
package main
const (
seafHTTPResBadFileName = 440
seafHTTPResExists = 441
seafHTTPResNotExists = 441
seafHTTPResTooLarge = 442
seafHTTPResNoQuota = 443
seafHTTPResRepoDeleted = 444
seafHTTPResRepoCorrupted = 445
seafHTTPResBlockMissing = 446
)
================================================
FILE: fileserver/merge.go
================================================
package main
import (
"bytes"
"encoding/json"
"fmt"
"net/http"
"path/filepath"
"sort"
"strings"
"time"
"github.com/haiwen/seafile-server/fileserver/commitmgr"
"github.com/haiwen/seafile-server/fileserver/fsmgr"
"github.com/haiwen/seafile-server/fileserver/option"
"github.com/haiwen/seafile-server/fileserver/utils"
)
type mergeOptions struct {
remoteRepoID string
remoteHead string
mergedRoot string
conflict bool
emailToNickname map[string]string
}
func mergeTrees(storeID string, roots []string, opt *mergeOptions) error {
if len(roots) != 3 {
err := fmt.Errorf("invalid argument")
return err
}
opt.emailToNickname = make(map[string]string)
var trees []*fsmgr.SeafDir
for i := 0; i < 3; i++ {
dir, err := fsmgr.GetSeafdir(storeID, roots[i])
if err != nil {
err := fmt.Errorf("failed to get dir: %v", err)
return err
}
trees = append(trees, dir)
}
err := mergeTreesRecursive(storeID, trees, "", opt)
if err != nil {
err := fmt.Errorf("failed to merge trees: %v", err)
return err
}
return nil
}
func mergeTreesRecursive(storeID string, trees []*fsmgr.SeafDir, baseDir string, opt *mergeOptions) error {
var ptrs [3][]*fsmgr.SeafDirent
var mergedDents []*fsmgr.SeafDirent
n := 3
for i := 0; i < n; i++ {
if trees[i] != nil {
ptrs[i] = trees[i].Entries
}
}
var done bool
var offset = make([]int, n)
for {
dents := make([]*fsmgr.SeafDirent, n)
var firstName string
done = true
for i := 0; i < n; i++ {
if len(ptrs[i]) > offset[i] {
done = false
dent := ptrs[i][offset[i]]
if firstName == "" {
firstName = dent.Name
} else if dent.Name > firstName {
firstName = dent.Name
}
}
}
if done {
break
}
var nFiles, nDirs int
for i := 0; i < n; i++ {
if len(ptrs[i]) > offset[i] {
dent := ptrs[i][offset[i]]
if firstName == dent.Name {
if fsmgr.IsDir(dent.Mode) {
nDirs++
} else {
nFiles++
}
dents[i] = dent
offset[i]++
}
}
}
if nFiles > 0 {
retDents, err := mergeEntries(storeID, dents, baseDir, opt)
if err != nil {
return err
}
mergedDents = append(mergedDents, retDents...)
}
if nDirs > 0 {
retDents, err := mergeDirectories(storeID, dents, baseDir, opt)
if err != nil {
return err
}
mergedDents = append(mergedDents, retDents...)
}
}
sort.Sort(Dirents(mergedDents))
mergedTree, err := fsmgr.NewSeafdir(1, mergedDents)
if err != nil {
err := fmt.Errorf("failed to new seafdir: %v", err)
return err
}
opt.mergedRoot = mergedTree.DirID
if trees[1] != nil && trees[1].DirID == mergedTree.DirID ||
trees[2] != nil && trees[2].DirID == mergedTree.DirID {
return nil
}
err = fsmgr.SaveSeafdir(storeID, mergedTree)
if err != nil {
err := fmt.Errorf("failed to save merged tree %s/%s", storeID, baseDir)
return err
}
return nil
}
func mergeEntries(storeID string, dents []*fsmgr.SeafDirent, baseDir string, opt *mergeOptions) ([]*fsmgr.SeafDirent, error) {
var mergedDents []*fsmgr.SeafDirent
n := 3
files := make([]*fsmgr.SeafDirent, n)
for i := 0; i < n; i++ {
if dents[i] != nil && !fsmgr.IsDir(dents[i].Mode) {
files[i] = dents[i]
}
}
base := files[0]
head := files[1]
remote := files[2]
if head != nil && remote != nil {
if head.ID == remote.ID {
mergedDents = append(mergedDents, head)
} else if base != nil && base.ID == head.ID {
mergedDents = append(mergedDents, remote)
} else if base != nil && base.ID == remote.ID {
mergedDents = append(mergedDents, head)
} else {
conflictName, _ := mergeConflictFileName(storeID, opt, baseDir, head.Name)
if conflictName == "" {
err := fmt.Errorf("failed to generate conflict file name")
return nil, err
}
dents[2].Name = conflictName
mergedDents = append(mergedDents, head)
mergedDents = append(mergedDents, remote)
opt.conflict = true
}
} else if base != nil && head == nil && remote != nil {
if base.ID != remote.ID {
if dents[1] != nil {
conflictName, _ := mergeConflictFileName(storeID, opt, baseDir, remote.Name)
if conflictName == "" {
err := fmt.Errorf("failed to generate conflict file name")
return nil, err
}
dents[2].Name = conflictName
mergedDents = append(mergedDents, remote)
opt.conflict = true
} else {
mergedDents = append(mergedDents, remote)
}
}
} else if base != nil && head != nil && remote == nil {
if base.ID != head.ID {
if dents[2] != nil {
conflictName, _ := mergeConflictFileName(storeID, opt, baseDir, dents[2].Name)
if conflictName == "" {
err := fmt.Errorf("failed to generate conflict file name")
return nil, err
}
dents[2].Name = conflictName
mergedDents = append(mergedDents, head)
opt.conflict = true
} else {
mergedDents = append(mergedDents, head)
}
}
} else if base == nil && head == nil && remote != nil {
if dents[1] == nil {
mergedDents = append(mergedDents, remote)
} else if dents[0] != nil && dents[0].ID == dents[1].ID {
mergedDents = append(mergedDents, remote)
} else {
conflictName, _ := mergeConflictFileName(storeID, opt, baseDir, remote.Name)
if conflictName == "" {
err := fmt.Errorf("failed to generate conflict file name")
return nil, err
}
dents[2].Name = conflictName
mergedDents = append(mergedDents, remote)
opt.conflict = true
}
} else if base == nil && head != nil && remote == nil {
if dents[2] == nil {
mergedDents = append(mergedDents, head)
} else if dents[0] != nil && dents[0].ID == dents[2].ID {
mergedDents = append(mergedDents, head)
} else {
conflictName, _ := mergeConflictFileName(storeID, opt, baseDir, dents[2].Name)
if conflictName == "" {
err := fmt.Errorf("failed to generate conflict file name")
return nil, err
}
dents[2].Name = conflictName
mergedDents = append(mergedDents, head)
opt.conflict = true
}
} /* else if base != nil && head == nil && remote == nil {
Don't need to add anything to mergeDents.
}*/
return mergedDents, nil
}
func mergeDirectories(storeID string, dents []*fsmgr.SeafDirent, baseDir string, opt *mergeOptions) ([]*fsmgr.SeafDirent, error) {
var dirMask int
var mergedDents []*fsmgr.SeafDirent
var dirName string
n := 3
subDirs := make([]*fsmgr.SeafDir, n)
for i := 0; i < n; i++ {
if dents[i] != nil && fsmgr.IsDir(dents[i].Mode) {
dirMask |= 1 << i
}
}
switch dirMask {
case 0:
err := fmt.Errorf("no dirent for merge")
return nil, err
case 1:
return mergedDents, nil
case 2:
mergedDents = append(mergedDents, dents[1])
return mergedDents, nil
case 3:
if dents[0].ID == dents[1].ID {
return mergedDents, nil
}
case 4:
mergedDents = append(mergedDents, dents[2])
return mergedDents, nil
case 5:
if dents[0].ID == dents[2].ID {
return mergedDents, nil
}
case 6, 7:
if dents[1].ID == dents[2].ID {
mergedDents = append(mergedDents, dents[1])
return mergedDents, nil
} else if dents[0] != nil && dents[0].ID == dents[1].ID {
mergedDents = append(mergedDents, dents[2])
return mergedDents, nil
} else if dents[0] != nil && dents[0].ID == dents[2].ID {
mergedDents = append(mergedDents, dents[1])
return mergedDents, nil
}
default:
err := fmt.Errorf("wrong dir mask for merge")
return nil, err
}
for i := 0; i < n; i++ {
subDirs[i] = nil
}
for i := 0; i < n; i++ {
if dents[i] != nil && fsmgr.IsDir(dents[i].Mode) {
dir, err := fsmgr.GetSeafdir(storeID, dents[i].ID)
if err != nil {
err := fmt.Errorf("failed to get seafdir %s/%s", storeID, dents[i].ID)
return nil, err
}
subDirs[i] = dir
dirName = dents[i].Name
}
}
newBaseDir := filepath.Join(baseDir, dirName)
newBaseDir = newBaseDir + "/"
err := mergeTreesRecursive(storeID, subDirs, newBaseDir, opt)
if err != nil {
err := fmt.Errorf("failed to merge trees: %v", err)
return nil, err
}
if dirMask == 3 || dirMask == 6 || dirMask == 7 {
dent := dents[1]
dent.ID = opt.mergedRoot
mergedDents = append(mergedDents, dent)
} else if dirMask == 5 {
dent := dents[2]
dent.ID = opt.mergedRoot
mergedDents = append(mergedDents, dent)
}
return mergedDents, nil
}
func mergeConflictFileName(storeID string, opt *mergeOptions, baseDir, fileName string) (string, error) {
var modifier string
var mtime int64
filePath := filepath.Join(baseDir, fileName)
modifier, mtime, err := getFileModifierMtime(opt.remoteRepoID, storeID, opt.remoteHead, filePath)
if err != nil {
commit, err := commitmgr.Load(opt.remoteRepoID, opt.remoteHead)
if err != nil {
err := fmt.Errorf("failed to get head commit")
return "", err
}
modifier = commit.CreatorName
mtime = time.Now().Unix()
}
nickname := getNickNameByModifier(opt.emailToNickname, modifier)
conflictName := genConflictPath(fileName, nickname, mtime)
return conflictName, nil
}
func genConflictPath(originPath, modifier string, mtime int64) string {
var conflictPath string
now := time.Now()
timeBuf := now.Format("2006-Jan-2-15-04-05")
dot := strings.Index(originPath, ".")
if dot < 0 {
if modifier != "" {
conflictPath = fmt.Sprintf("%s (SFConflict %s %s)",
originPath, modifier, timeBuf)
} else {
conflictPath = fmt.Sprintf("%s (SFConflict %s)",
originPath, timeBuf)
}
} else {
if modifier != "" {
conflictPath = fmt.Sprintf("%s (SFConflict %s %s).%s",
originPath, modifier, timeBuf, originPath[dot+1:])
} else {
conflictPath = fmt.Sprintf("%s (SFConflict %s).%s",
originPath, timeBuf, originPath[dot+1:])
}
}
return conflictPath
}
func getNickNameByModifier(emailToNickname map[string]string, modifier string) string {
if modifier == "" {
return ""
}
nickname, ok := emailToNickname[modifier]
if ok {
return nickname
}
if option.JWTPrivateKey != "" {
nickname = postGetNickName(modifier)
}
if nickname == "" {
nickname = modifier
}
emailToNickname[modifier] = nickname
return nickname
}
func postGetNickName(modifier string) string {
tokenString, err := utils.GenSeahubJWTToken()
if err != nil {
return ""
}
header := map[string][]string{
"Authorization": {"Token " + tokenString},
}
data, err := json.Marshal(map[string]interface{}{
"user_id_list": []string{modifier},
})
if err != nil {
return ""
}
url := option.SeahubURL + "/user-list/"
status, body, err := utils.HttpCommon("POST", url, header, bytes.NewReader(data))
if err != nil {
return ""
}
if status != http.StatusOK {
return ""
}
results := make(map[string]interface{})
err = json.Unmarshal(body, &results)
if err != nil {
return ""
}
userList, ok := results["user_list"].([]interface{})
if !ok {
return ""
}
nickname := ""
for _, element := range userList {
list, ok := element.(map[string]interface{})
if !ok {
continue
}
nickname, _ = list["name"].(string)
if nickname != "" {
break
}
}
return nickname
}
func getFileModifierMtime(repoID, storeID, head, filePath string) (string, int64, error) {
commit, err := commitmgr.Load(repoID, head)
if err != nil {
err := fmt.Errorf("failed to get head commit")
return "", -1, err
}
parent := filepath.Dir(filePath)
if parent == "." {
parent = ""
}
fileName := filepath.Base(filePath)
dir, err := fsmgr.GetSeafdirByPath(storeID, commit.RootID, parent)
if err != nil {
err := fmt.Errorf("dir %s doesn't exist in repo %s", parent, repoID)
return "", -1, err
}
var dent *fsmgr.SeafDirent
entries := dir.Entries
for _, d := range entries {
if d.Name == fileName {
dent = d
break
}
}
if dent == nil {
err := fmt.Errorf("file %s doesn't exist in repo %s", fileName, repoID)
return "", -1, err
}
return dent.Modifier, dent.Mtime, nil
}
================================================
FILE: fileserver/merge_test.go
================================================
package main
import (
"fmt"
"os"
"syscall"
"testing"
"github.com/haiwen/seafile-server/fileserver/commitmgr"
"github.com/haiwen/seafile-server/fileserver/fsmgr"
"github.com/haiwen/seafile-server/fileserver/option"
)
const (
mergeTestCommitID = "0401fc662e3bc87a41f299a907c056aaf8322a27"
mergeTestRepoID = "b1f2ad61-9164-418a-a47f-ab805dbd5694"
mergeTestSeafileConfPath = "/tmp/conf"
mergeTestSeafileDataDir = "/tmp/conf/seafile-data"
)
var mergeTestTree1 string
var mergeTestTree2 string
var mergeTestTree3 string
var mergeTestTree4 string
var mergeTestTree5 string
var mergeTestTree1CommitID string
var mergeTestTree2CommitID string
var mergeTestTree3CommitID string
var mergeTestTree4CommitID string
/*
test directory structure:
tree1
|--bbb
|-- testfile(size:1)
tree2
|--bbb
|-- testfile(size:10)
tree3
|--bbb
tree4
|--bbb
|-- testfile(size:100)
tree5
|--
*/
func mergeTestCreateTestDir() error {
modeDir := uint32(syscall.S_IFDIR | 0644)
modeFile := uint32(syscall.S_IFREG | 0644)
emptyDir, err := mergeTestCreateSeafdir(nil)
if err != nil {
err := fmt.Errorf("failed to get seafdir: %v", err)
return err
}
mergeTestTree5 = emptyDir
file1, err := fsmgr.NewSeafile(1, 1, []string{"4f616f98d6a264f75abffe1bc150019c880be239"})
if err != nil {
err := fmt.Errorf("failed to new seafile: %v", err)
return err
}
err = fsmgr.SaveSeafile(mergeTestRepoID, file1)
if err != nil {
err := fmt.Errorf("failed to save seafile: %v", err)
return err
}
dent1 := fsmgr.SeafDirent{ID: file1.FileID, Name: "testfile", Mode: modeFile, Size: 1}
dir1, err := mergeTestCreateSeafdir([]*fsmgr.SeafDirent{&dent1})
if err != nil {
err := fmt.Errorf("failed to get seafdir: %v", err)
return err
}
dent2 := fsmgr.SeafDirent{ID: dir1, Name: "bbb", Mode: modeDir}
dir2, err := mergeTestCreateSeafdir([]*fsmgr.SeafDirent{&dent2})
if err != nil {
err := fmt.Errorf("failed to get seafdir: %v", err)
return err
}
mergeTestTree1 = dir2
commit1 := commitmgr.NewCommit(mergeTestRepoID, "", mergeTestTree1, "seafile", "this is the first commit.\n")
err = commitmgr.Save(commit1)
if err != nil {
err := fmt.Errorf("failed to save commit: %v", err)
return err
}
mergeTestTree1CommitID = commit1.CommitID
file2, err := fsmgr.NewSeafile(1, 10, []string{"4f616f98d6a264f75abffe1bc150019c880be239"})
if err != nil {
err := fmt.Errorf("failed to new seafile: %v", err)
return err
}
err = fsmgr.SaveSeafile(mergeTestRepoID, file2)
if err != nil {
err := fmt.Errorf("failed to save seafile: %v", err)
return err
}
dent3 := fsmgr.SeafDirent{ID: file2.FileID, Name: "testfile", Mode: modeFile, Size: 10}
dir3, err := mergeTestCreateSeafdir([]*fsmgr.SeafDirent{&dent3})
if err != nil {
err := fmt.Errorf("failed to get seafdir: %v", err)
return err
}
dent4 := fsmgr.SeafDirent{ID: dir3, Name: "bbb", Mode: modeDir}
dir4, err := mergeTestCreateSeafdir([]*fsmgr.SeafDirent{&dent4})
if err != nil {
err := fmt.Errorf("failed to get seafdir: %v", err)
return err
}
mergeTestTree2 = dir4
commit2 := commitmgr.NewCommit(mergeTestRepoID, "", mergeTestTree2, "seafile", "this is the second commit.\n")
err = commitmgr.Save(commit2)
if err != nil {
err := fmt.Errorf("failed to save commit: %v", err)
return err
}
mergeTestTree2CommitID = commit2.CommitID
dir5, err := mergeTestCreateSeafdir(nil)
if err != nil {
err := fmt.Errorf("failed to get seafdir: %v", err)
return err
}
dent6 := fsmgr.SeafDirent{ID: dir5, Name: "bbb", Mode: modeDir}
dir6, err := mergeTestCreateSeafdir([]*fsmgr.SeafDirent{&dent6})
if err != nil {
err := fmt.Errorf("failed to get seafdir: %v", err)
return err
}
mergeTestTree3 = dir6
commit3 := commitmgr.NewCommit(mergeTestRepoID, "", mergeTestTree3, "seafile", "this is the third commit.\n")
err = commitmgr.Save(commit3)
if err != nil {
err := fmt.Errorf("failed to save commit: %v", err)
return err
}
mergeTestTree3CommitID = commit3.CommitID
file3, err := fsmgr.NewSeafile(1, 100, []string{"4f616f98d6a264f75abffe1bc150019c880be240"})
if err != nil {
err := fmt.Errorf("failed to new seafile: %v", err)
return err
}
err = fsmgr.SaveSeafile(mergeTestRepoID, file3)
if err != nil {
err := fmt.Errorf("failed to save seafile: %v", err)
return err
}
dent7 := fsmgr.SeafDirent{ID: file3.FileID, Name: "testfile", Mode: modeFile, Size: 100}
dir7, err := mergeTestCreateSeafdir([]*fsmgr.SeafDirent{&dent7})
if err != nil {
err := fmt.Errorf("failed to get seafdir: %v", err)
return err
}
dent8 := fsmgr.SeafDirent{ID: dir7, Name: "bbb", Mode: modeDir}
dir8, err := mergeTestCreateSeafdir([]*fsmgr.SeafDirent{&dent8})
if err != nil {
err := fmt.Errorf("failed to get seafdir: %v", err)
return err
}
mergeTestTree4 = dir8
commit4 := commitmgr.NewCommit(mergeTestRepoID, "", mergeTestTree3, "seafile", "this is the fourth commit.\n")
err = commitmgr.Save(commit4)
if err != nil {
err := fmt.Errorf("failed to save commit: %v", err)
return err
}
mergeTestTree4CommitID = commit4.CommitID
return nil
}
func mergeTestCreateSeafdir(dents []*fsmgr.SeafDirent) (string, error) {
seafdir, err := fsmgr.NewSeafdir(1, dents)
if err != nil {
err := fmt.Errorf("failed to new seafdir: %v", err)
return "", err
}
err = fsmgr.SaveSeafdir(mergeTestRepoID, seafdir)
if err != nil {
return "", err
}
return seafdir.DirID, nil
}
func mergeTestDelFile() error {
err := os.RemoveAll(mergeTestSeafileConfPath)
if err != nil {
return err
}
return nil
}
func TestMergeTrees(t *testing.T) {
commitmgr.Init(mergeTestSeafileConfPath, mergeTestSeafileDataDir)
fsmgr.Init(mergeTestSeafileConfPath, mergeTestSeafileDataDir, option.FsCacheLimit)
err := mergeTestCreateTestDir()
if err != nil {
fmt.Printf("failed to create test dir: %v", err)
os.Exit(1)
}
t.Run("test1", testMergeTrees1)
t.Run("test2", testMergeTrees2)
t.Run("test3", testMergeTrees3)
t.Run("test4", testMergeTrees4)
t.Run("test5", testMergeTrees5)
t.Run("test6", testMergeTrees6)
t.Run("test7", testMergeTrees7)
t.Run("test8", testMergeTrees8)
t.Run("test9", testMergeTrees9)
t.Run("test10", testMergeTrees10)
t.Run("test11", testMergeTrees11)
t.Run("test12", testMergeTrees12)
err = mergeTestDelFile()
if err != nil {
fmt.Printf("failed to remove test file : %v", err)
os.Exit(1)
}
}
// head add file
func testMergeTrees1(t *testing.T) {
commit, err := commitmgr.Load(mergeTestRepoID, mergeTestTree3CommitID)
if err != nil {
t.Errorf("failed to load commit.\n")
}
roots := []string{mergeTestTree3, mergeTestTree2, mergeTestTree3}
opt := new(mergeOptions)
opt.remoteRepoID = mergeTestRepoID
opt.remoteHead = commit.CommitID
err = mergeTrees(mergeTestRepoID, roots, opt)
if err != nil {
t.Errorf("failed to merge.\n")
}
if opt.mergedRoot != mergeTestTree2 {
t.Errorf("merge error %s/%s.\n", opt.mergedRoot, mergeTestTree2)
}
}
// remote add file
func testMergeTrees2(t *testing.T) {
commit, err := commitmgr.Load(mergeTestRepoID, mergeTestTree3CommitID)
if err != nil {
t.Errorf("failed to load commit.\n")
}
roots := []string{mergeTestTree3, mergeTestTree3, mergeTestTree2}
opt := new(mergeOptions)
opt.remoteRepoID = mergeTestRepoID
opt.remoteHead = commit.CommitID
err = mergeTrees(mergeTestRepoID, roots, opt)
if err != nil {
t.Errorf("failed to merge.\n")
}
if opt.mergedRoot != mergeTestTree2 {
t.Errorf("merge error %s/%s.\n", opt.mergedRoot, mergeTestTree2)
}
}
// head modify file
func testMergeTrees3(t *testing.T) {
commit, err := commitmgr.Load(mergeTestRepoID, mergeTestTree1CommitID)
if err != nil {
t.Errorf("failed to load commit.\n")
}
roots := []string{mergeTestTree1, mergeTestTree2, mergeTestTree1}
opt := new(mergeOptions)
opt.remoteRepoID = mergeTestRepoID
opt.remoteHead = commit.CommitID
err = mergeTrees(mergeTestRepoID, roots, opt)
if err != nil {
t.Errorf("failed to merge.\n")
}
if opt.mergedRoot != mergeTestTree2 {
t.Errorf("merge error %s/%s.\n", opt.mergedRoot, mergeTestTree2)
}
}
// remote modify file
func testMergeTrees4(t *testing.T) {
commit, err := commitmgr.Load(mergeTestRepoID, mergeTestTree1CommitID)
if err != nil {
t.Errorf("failed to load commit.\n")
}
roots := []string{mergeTestTree1, mergeTestTree1, mergeTestTree2}
opt := new(mergeOptions)
opt.remoteRepoID = mergeTestRepoID
opt.remoteHead = commit.CommitID
err = mergeTrees(mergeTestRepoID, roots, opt)
if err != nil {
t.Errorf("failed to merge.\n")
}
if opt.mergedRoot != mergeTestTree2 {
t.Errorf("merge error %s/%s.\n", opt.mergedRoot, mergeTestTree2)
}
}
// head and remote add file
func testMergeTrees5(t *testing.T) {
commit, err := commitmgr.Load(mergeTestRepoID, mergeTestTree3CommitID)
if err != nil {
t.Errorf("failed to load commit.\n")
}
roots := []string{mergeTestTree3, mergeTestTree1, mergeTestTree2}
opt := new(mergeOptions)
opt.remoteRepoID = mergeTestRepoID
opt.remoteHead = commit.CommitID
err = mergeTrees(mergeTestRepoID, roots, opt)
if err != nil {
t.Errorf("failed to merge.\n")
}
if !opt.conflict {
t.Errorf("merge error %s.\n", opt.mergedRoot)
}
}
// head and remote modify file
func testMergeTrees6(t *testing.T) {
commit, err := commitmgr.Load(mergeTestRepoID, mergeTestTree1CommitID)
if err != nil {
t.Errorf("failed to load commit.\n")
}
roots := []string{mergeTestTree1, mergeTestTree2, mergeTestTree4}
opt := new(mergeOptions)
opt.remoteRepoID = mergeTestRepoID
opt.remoteHead = commit.CommitID
err = mergeTrees(mergeTestRepoID, roots, opt)
if err != nil {
t.Errorf("failed to merge.\n")
}
if !opt.conflict {
t.Errorf("merge error %s.\n", opt.mergedRoot)
}
}
// head modify file and remote delete file
func testMergeTrees7(t *testing.T) {
commit, err := commitmgr.Load(mergeTestRepoID, mergeTestTree1CommitID)
if err != nil {
t.Errorf("failed to load commit.\n")
}
roots := []string{mergeTestTree1, mergeTestTree2, mergeTestTree3}
opt := new(mergeOptions)
opt.remoteRepoID = mergeTestRepoID
opt.remoteHead = commit.CommitID
err = mergeTrees(mergeTestRepoID, roots, opt)
if err != nil {
t.Errorf("failed to merge.\n")
}
if opt.mergedRoot != mergeTestTree2 {
t.Errorf("merge error %s/%s.\n", opt.mergedRoot, mergeTestTree2)
}
}
// head delete file and remote modify file
func testMergeTrees8(t *testing.T) {
commit, err := commitmgr.Load(mergeTestRepoID, mergeTestTree1CommitID)
if err != nil {
t.Errorf("failed to load commit.\n")
}
roots := []string{mergeTestTree1, mergeTestTree3, mergeTestTree2}
opt := new(mergeOptions)
opt.remoteRepoID = mergeTestRepoID
opt.remoteHead = commit.CommitID
err = mergeTrees(mergeTestRepoID, roots, opt)
if err != nil {
t.Errorf("failed to merge.\n")
}
if opt.mergedRoot != mergeTestTree2 {
t.Errorf("merge error %s/%s.\n", opt.mergedRoot, mergeTestTree2)
}
}
// head modify file and remote delete dir of this file
func testMergeTrees9(t *testing.T) {
commit, err := commitmgr.Load(mergeTestRepoID, mergeTestTree1CommitID)
if err != nil {
t.Errorf("failed to load commit.\n")
}
roots := []string{mergeTestTree1, mergeTestTree2, mergeTestTree5}
opt := new(mergeOptions)
opt.remoteRepoID = mergeTestRepoID
opt.remoteHead = commit.CommitID
err = mergeTrees(mergeTestRepoID, roots, opt)
if err != nil {
t.Errorf("failed to merge.\n")
}
if opt.mergedRoot != mergeTestTree2 {
t.Errorf("merge error %s/%s.\n", opt.mergedRoot, mergeTestTree2)
}
}
// remote modify file and head delete dir of this file
func testMergeTrees10(t *testing.T) {
commit, err := commitmgr.Load(mergeTestRepoID, mergeTestTree1CommitID)
if err != nil {
t.Errorf("failed to load commit.\n")
}
roots := []string{mergeTestTree1, mergeTestTree5, mergeTestTree2}
opt := new(mergeOptions)
opt.remoteRepoID = mergeTestRepoID
opt.remoteHead = commit.CommitID
err = mergeTrees(mergeTestRepoID, roots, opt)
if err != nil {
t.Errorf("failed to merge.\n")
}
if opt.mergedRoot != mergeTestTree2 {
t.Errorf("merge error %s/%s.\n", opt.mergedRoot, mergeTestTree2)
}
}
// head add file and remote delete dir of thie file
func testMergeTrees11(t *testing.T) {
commit, err := commitmgr.Load(mergeTestRepoID, mergeTestTree3CommitID)
if err != nil {
t.Errorf("failed to load commit.\n")
}
roots := []string{mergeTestTree3, mergeTestTree1, mergeTestTree5}
opt := new(mergeOptions)
opt.remoteRepoID = mergeTestRepoID
opt.remoteHead = commit.CommitID
err = mergeTrees(mergeTestRepoID, roots, opt)
if err != nil {
t.Errorf("failed to merge.\n")
}
if opt.mergedRoot != mergeTestTree1 {
t.Errorf("merge error %s/%s.\n", opt.mergedRoot, mergeTestTree1)
}
}
// remote add file and head delete dir of this file
func testMergeTrees12(t *testing.T) {
commit, err := commitmgr.Load(mergeTestRepoID, mergeTestTree3CommitID)
if err != nil {
t.Errorf("failed to load commit.\n")
}
roots := []string{mergeTestTree3, mergeTestTree5, mergeTestTree1}
opt := new(mergeOptions)
opt.remoteRepoID = mergeTestRepoID
opt.remoteHead = commit.CommitID
err = mergeTrees(mergeTestRepoID, roots, opt)
if err != nil {
t.Errorf("failed to merge.\n")
}
if opt.mergedRoot != mergeTestTree1 {
t.Errorf("merge error %s/%s.\n", opt.mergedRoot, mergeTestTree1)
}
}
================================================
FILE: fileserver/metrics/metrics.go
================================================
package metrics
import (
"container/list"
"context"
"encoding/json"
"fmt"
"net/http"
"runtime/debug"
"sync"
"time"
"github.com/dgraph-io/ristretto/z"
"github.com/go-redis/redis/v8"
"github.com/haiwen/seafile-server/fileserver/option"
log "github.com/sirupsen/logrus"
)
const (
RedisChannel = "metric_channel"
ComponentName = "go_fileserver"
MetricInterval = 30 * time.Second
)
type MetricMgr struct {
sync.Mutex
inFlightRequestList *list.List
}
type RequestInfo struct {
urlPath string
method string
start time.Time
}
func (m *MetricMgr) AddReq(urlPath, method string) *list.Element {
req := new(RequestInfo)
req.urlPath = urlPath
req.method = method
req.start = time.Now()
m.Lock()
defer m.Unlock()
e := m.inFlightRequestList.PushBack(req)
return e
}
func (m *MetricMgr) DecReq(e *list.Element) {
m.Lock()
defer m.Unlock()
m.inFlightRequestList.Remove(e)
}
var (
client *redis.Client
closer *z.Closer
metricMgr *MetricMgr
)
func Init() {
if !option.HasRedisOptions {
return
}
metricMgr = new(MetricMgr)
metricMgr.inFlightRequestList = list.New()
closer = z.NewCloser(1)
go metricsHandler()
}
func Stop() {
if !option.HasRedisOptions {
return
}
closer.SignalAndWait()
}
func metricsHandler() {
defer closer.Done()
defer func() {
if err := recover(); err != nil {
log.Errorf("panic: %v\n%s", err, debug.Stack())
}
}()
server := fmt.Sprintf("%s:%d", option.RedisHost, option.RedisPort)
opt := &redis.Options{
Addr: server,
Password: option.RedisPasswd,
}
opt.PoolSize = 1
client = redis.NewClient(opt)
ticker := time.NewTicker(MetricInterval)
defer ticker.Stop()
for {
select {
case <-closer.HasBeenClosed():
return
case <-ticker.C:
err := publishMetrics()
if err != nil {
log.Warnf("Failed to publish metrics to redis channel: %v", err)
continue
}
}
}
}
func MetricMiddleware(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
req := metricMgr.AddReq(r.URL.Path, r.Method)
next.ServeHTTP(w, r)
metricMgr.DecReq(req)
})
}
type MetricMessage struct {
MetricName string `json:"metric_name"`
MetricValue any `json:"metric_value"`
MetricType string `json:"metric_type"`
ComponentName string `json:"component_name"`
MetricHelp string `json:"metric_help"`
NodeName string `json:"node_name"`
}
func publishMetrics() error {
metricMgr.Lock()
inFlightRequestCount := metricMgr.inFlightRequestList.Len()
metricMgr.Unlock()
msg := &MetricMessage{MetricName: "in_flight_request_total",
MetricValue: inFlightRequestCount,
MetricType: "gauge",
ComponentName: ComponentName,
MetricHelp: "The number of currently running http requests.",
NodeName: option.NodeName,
}
data, err := json.Marshal(msg)
if err != nil {
return err
}
err = publishRedisMsg(RedisChannel, data)
if err != nil {
return err
}
return nil
}
func publishRedisMsg(channel string, msg []byte) error {
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
err := client.Publish(ctx, channel, msg).Err()
if err != nil {
return fmt.Errorf("failed to publish redis message: %w", err)
}
return nil
}
================================================
FILE: fileserver/objstore/backend_fs.go
================================================
// Implementation of file system storage backend.
package objstore
import (
"io"
"os"
"path"
)
type fsBackend struct {
// Path of the object directory
objDir string
objType string
tmpDir string
}
func newFSBackend(seafileDataDir string, objType string) (*fsBackend, error) {
objDir := path.Join(seafileDataDir, "storage", objType)
err := os.MkdirAll(objDir, os.ModePerm)
if err != nil {
return nil, err
}
tmpDir := path.Join(seafileDataDir, "tmpfiles")
err = os.MkdirAll(tmpDir, os.ModePerm)
if err != nil {
return nil, err
}
backend := new(fsBackend)
backend.objDir = objDir
backend.objType = objType
backend.tmpDir = tmpDir
return backend, nil
}
func (b *fsBackend) read(repoID string, objID string, w io.Writer) error {
p := path.Join(b.objDir, repoID, objID[:2], objID[2:])
fd, err := os.Open(p)
if err != nil {
return err
}
defer fd.Close()
_, err = io.Copy(w, fd)
if err != nil {
return err
}
return nil
}
func (b *fsBackend) write(repoID string, objID string, r io.Reader, sync bool) error {
parentDir := path.Join(b.objDir, repoID, objID[:2])
p := path.Join(parentDir, objID[2:])
err := os.MkdirAll(parentDir, os.ModePerm)
if err != nil {
return err
}
tmpDir := b.tmpDir
if b.objType != "blocks" {
tmpDir = parentDir
}
tFile, err := os.CreateTemp(tmpDir, objID+".*")
if err != nil {
return err
}
success := false
defer func() {
if !success {
os.Remove(tFile.Name())
}
}()
_, err = io.Copy(tFile, r)
if err != nil {
tFile.Close()
return err
}
err = tFile.Close()
if err != nil {
return err
}
err = os.Rename(tFile.Name(), p)
if err != nil {
return err
}
success = true
return nil
}
func (b *fsBackend) exists(repoID string, objID string) (bool, error) {
path := path.Join(b.objDir, repoID, objID[:2], objID[2:])
_, err := os.Stat(path)
if err != nil {
if os.IsNotExist(err) {
return false, err
}
return true, err
}
return true, nil
}
func (b *fsBackend) stat(repoID string, objID string) (int64, error) {
path := path.Join(b.objDir, repoID, objID[:2], objID[2:])
fileInfo, err := os.Stat(path)
if err != nil {
return -1, err
}
return fileInfo.Size(), nil
}
================================================
FILE: fileserver/objstore/objstore.go
================================================
// Package objstore provides operations for commit, fs and block objects.
// It is low-level package used by commitmgr, fsmgr, blockmgr packages to access storage.
package objstore
import (
"io"
)
// ObjectStore is a container to access storage backend
type ObjectStore struct {
// can be "commit", "fs", or "block"
ObjType string
backend storageBackend
}
// storageBackend is the interface implemented by storage backends.
// An object store may have one or multiple storage backends.
type storageBackend interface {
// Read an object from backend and write the contents into w.
read(repoID string, objID string, w io.Writer) (err error)
// Write the contents from r to the object.
write(repoID string, objID string, r io.Reader, sync bool) (err error)
// exists checks whether an object exists.
exists(repoID string, objID string) (res bool, err error)
// stat calculates an object's size
stat(repoID string, objID string) (res int64, err error)
}
// New returns a new object store for a given type of objects.
// objType can be "commit", "fs", or "block".
func New(seafileConfPath string, seafileDataDir string, objType string) *ObjectStore {
obj := new(ObjectStore)
obj.ObjType = objType
obj.backend, _ = newFSBackend(seafileDataDir, objType)
return obj
}
// Read data from storage backends.
func (s *ObjectStore) Read(repoID string, objID string, w io.Writer) (err error) {
return s.backend.read(repoID, objID, w)
}
// Write data to storage backends.
func (s *ObjectStore) Write(repoID string, objID string, r io.Reader, sync bool) (err error) {
return s.backend.write(repoID, objID, r, sync)
}
// Check whether object exists.
func (s *ObjectStore) Exists(repoID string, objID string) (res bool, err error) {
return s.backend.exists(repoID, objID)
}
// Stat calculates object size.
func (s *ObjectStore) Stat(repoID string, objID string) (res int64, err error) {
return s.backend.stat(repoID, objID)
}
================================================
FILE: fileserver/objstore/objstore_test.go
================================================
package objstore
import (
"fmt"
"os"
"path"
"testing"
)
const (
testFile = "output.data"
seafileConfPath = "/tmp/conf"
seafileDataDir = "/tmp/conf/seafile-data"
repoID = "b1f2ad61-9164-418a-a47f-ab805dbd5694"
objID = "0401fc662e3bc87a41f299a907c056aaf8322a27"
)
func createFile() error {
outputFile, err := os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666)
if err != nil {
return err
}
defer outputFile.Close()
outputString := "hello world!\n"
for i := 0; i < 10; i++ {
outputFile.WriteString(outputString)
}
return nil
}
func delFile() error {
err := os.Remove(testFile)
if err != nil {
return err
}
err = os.RemoveAll(seafileConfPath)
if err != nil {
return err
}
return nil
}
func TestMain(m *testing.M) {
err := createFile()
if err != nil {
fmt.Printf("Failed to create test file : %v\n", err)
os.Exit(1)
}
code := m.Run()
err = delFile()
if err != nil {
fmt.Printf("Failed to remove test file : %v\n", err)
os.Exit(1)
}
os.Exit(code)
}
func testWrite(t *testing.T) {
inputFile, err := os.Open(testFile)
if err != nil {
t.Errorf("Failed to open test file : %v\n", err)
}
defer inputFile.Close()
bend := New(seafileConfPath, seafileDataDir, "commit")
bend.Write(repoID, objID, inputFile, true)
}
func testRead(t *testing.T) {
outputFile, err := os.OpenFile(testFile, os.O_WRONLY, 0666)
if err != nil {
t.Errorf("Failed to open test file:%v\n", err)
}
defer outputFile.Close()
bend := New(seafileConfPath, seafileDataDir, "commit")
err = bend.Read(repoID, objID, outputFile)
if err != nil {
t.Errorf("Failed to read backend : %s\n", err)
}
}
func testExists(t *testing.T) {
bend := New(seafileConfPath, seafileDataDir, "commit")
ret, _ := bend.Exists(repoID, objID)
if !ret {
t.Errorf("File is not exist\n")
}
filePath := path.Join(seafileDataDir, "storage", "commit", repoID, objID[:2], objID[2:])
fileInfo, _ := os.Stat(filePath)
if fileInfo.Size() != 130 {
t.Errorf("File is exist, but the size of file is incorrect.\n")
}
}
func TestObjStore(t *testing.T) {
testWrite(t)
testRead(t)
testExists(t)
}
================================================
FILE: fileserver/option/option.go
================================================
package option
import (
"fmt"
"os"
"path/filepath"
"strconv"
"strings"
"time"
log "github.com/sirupsen/logrus"
"gopkg.in/ini.v1"
)
// InfiniteQuota indicates that the quota is unlimited.
const InfiniteQuota = -2
// Storage unit.
const (
KB = 1000
MB = 1000000
GB = 1000000000
TB = 1000000000000
)
var (
// fileserver options
Host string
Port uint32
MaxUploadSize uint64
FsIdListRequestTimeout int64
// Block size for indexing uploaded files
FixedBlockSize uint64
// Maximum number of goroutines to index uploaded files
MaxIndexingThreads uint32
WebTokenExpireTime uint32
// File mode for temp files
ClusterSharedTempFileMode uint32
WindowsEncoding string
SkipBlockHash bool
FsCacheLimit int64
VerifyClientBlocks bool
MaxIndexingFiles uint32
// general options
CloudMode bool
// notification server
EnableNotification bool
NotificationURL string
// GROUP options
GroupTableName string
// quota options
DefaultQuota int64
// redis options
HasRedisOptions bool
RedisHost string
RedisPasswd string
RedisPort uint32
RedisExpiry uint32
RedisMaxConn uint32
RedisTimeout time.Duration
// Profile password
ProfilePassword string
EnableProfiling bool
// Go log level
LogLevel string
// DB default timeout
DBOpTimeout time.Duration
// database
DBType string
// seahub
SeahubURL string
JWTPrivateKey string
// metric
NodeName string
)
type DBOption struct {
User string
Password string
Host string
Port int
CcnetDbName string
SeafileDbName string
CaPath string
UseTLS bool
SkipVerify bool
Charset string
DBEngine string
}
func initDefaultOptions() {
Host = "0.0.0.0"
Port = 8082
FixedBlockSize = 1 << 23
MaxIndexingThreads = 1
WebTokenExpireTime = 7200
ClusterSharedTempFileMode = 0600
DefaultQuota = InfiniteQuota
FsCacheLimit = 4 << 30
VerifyClientBlocks = true
FsIdListRequestTimeout = -1
DBOpTimeout = 60 * time.Second
RedisHost = "127.0.0.1"
RedisPort = 6379
RedisExpiry = 24 * 3600
RedisMaxConn = 100
RedisTimeout = 1 * time.Second
MaxIndexingFiles = 10
}
func LoadFileServerOptions(centralDir string) {
initDefaultOptions()
seafileConfPath := filepath.Join(centralDir, "seafile.conf")
opts := ini.LoadOptions{}
opts.SpaceBeforeInlineComment = true
config, err := ini.LoadSources(opts, seafileConfPath)
if err != nil {
log.Fatalf("Failed to load seafile.conf: %v", err)
}
CloudMode = false
if section, err := config.GetSection("general"); err == nil {
if key, err := section.GetKey("cloud_mode"); err == nil {
CloudMode, _ = key.Bool()
}
}
notifServer := os.Getenv("INNER_NOTIFICATION_SERVER_URL")
enableNotifServer := os.Getenv("ENABLE_NOTIFICATION_SERVER")
if notifServer != "" && enableNotifServer == "true" {
NotificationURL = notifServer
EnableNotification = true
}
if section, err := config.GetSection("httpserver"); err == nil {
parseFileServerSection(section)
}
if section, err := config.GetSection("fileserver"); err == nil {
parseFileServerSection(section)
}
if section, err := config.GetSection("quota"); err == nil {
if key, err := section.GetKey("default"); err == nil {
quotaStr := key.String()
DefaultQuota = parseQuota(quotaStr)
}
}
loadCacheOptionFromEnv()
GroupTableName = os.Getenv("SEAFILE_MYSQL_DB_GROUP_TABLE_NAME")
if GroupTableName == "" {
GroupTableName = "Group"
}
NodeName = os.Getenv("NODE_NAME")
if NodeName == "" {
NodeName = "default"
}
}
func parseFileServerSection(section *ini.Section) {
if key, err := section.GetKey("host"); err == nil {
Host = key.String()
}
if key, err := section.GetKey("port"); err == nil {
port, err := key.Uint()
if err == nil {
Port = uint32(port)
}
}
if key, err := section.GetKey("max_upload_size"); err == nil {
size, err := key.Uint()
if err == nil {
MaxUploadSize = uint64(size) * 1000000
}
}
if key, err := section.GetKey("max_indexing_threads"); err == nil {
threads, err := key.Uint()
if err == nil {
MaxIndexingThreads = uint32(threads)
}
}
if key, err := section.GetKey("fixed_block_size"); err == nil {
blkSize, err := key.Uint64()
if err == nil {
FixedBlockSize = blkSize * (1 << 20)
}
}
if key, err := section.GetKey("web_token_expire_time"); err == nil {
expire, err := key.Uint()
if err == nil {
WebTokenExpireTime = uint32(expire)
}
}
if key, err := section.GetKey("cluster_shared_temp_file_mode"); err == nil {
fileMode, err := key.Uint()
if err == nil {
ClusterSharedTempFileMode = uint32(fileMode)
}
}
if key, err := section.GetKey("enable_profiling"); err == nil {
EnableProfiling, _ = key.Bool()
}
if EnableProfiling {
if key, err := section.GetKey("profile_password"); err == nil {
ProfilePassword = key.String()
} else {
log.Fatal("password of profiling must be specified.")
}
}
if key, err := section.GetKey("go_log_level"); err == nil {
LogLevel = key.String()
}
if key, err := section.GetKey("fs_cache_limit"); err == nil {
fsCacheLimit, err := key.Int64()
if err == nil {
FsCacheLimit = fsCacheLimit * 1024 * 1024
}
}
// The ratio of physical memory consumption and fs objects is about 4:1,
// and this part of memory is generally not subject to GC. So the value is
// divided by 4.
FsCacheLimit = FsCacheLimit / 4
if key, err := section.GetKey("fs_id_list_request_timeout"); err == nil {
fsIdListRequestTimeout, err := key.Int64()
if err == nil {
FsIdListRequestTimeout = fsIdListRequestTimeout
}
}
if key, err := section.GetKey("verify_client_blocks_after_sync"); err == nil {
VerifyClientBlocks, _ = key.Bool()
}
if key, err := section.GetKey("max_indexing_files"); err == nil {
threads, err := key.Uint()
if err == nil && threads > 0 {
MaxIndexingFiles = uint32(threads)
}
}
}
func parseQuota(quotaStr string) int64 {
var quota int64
var multiplier int64 = GB
if end := strings.Index(quotaStr, "kb"); end > 0 {
multiplier = KB
quotaInt, err := strconv.ParseInt(quotaStr[:end], 10, 0)
if err != nil {
return InfiniteQuota
}
quota = quotaInt * multiplier
} else if end := strings.Index(quotaStr, "mb"); end > 0 {
multiplier = MB
quotaInt, err := strconv.ParseInt(quotaStr[:end], 10, 0)
if err != nil {
return InfiniteQuota
}
quota = quotaInt * multiplier
} else if end := strings.Index(quotaStr, "gb"); end > 0 {
multiplier = GB
quotaInt, err := strconv.ParseInt(quotaStr[:end], 10, 0)
if err != nil {
return InfiniteQuota
}
quota = quotaInt * multiplier
} else if end := strings.Index(quotaStr, "tb"); end > 0 {
multiplier = TB
quotaInt, err := strconv.ParseInt(quotaStr[:end], 10, 0)
if err != nil {
return InfiniteQuota
}
quota = quotaInt * multiplier
} else {
quotaInt, err := strconv.ParseInt(quotaStr, 10, 0)
if err != nil {
return InfiniteQuota
}
quota = quotaInt * multiplier
}
return quota
}
func loadCacheOptionFromEnv() {
cacheProvider := os.Getenv("CACHE_PROVIDER")
if cacheProvider != "redis" {
return
}
HasRedisOptions = true
redisHost := os.Getenv("REDIS_HOST")
if redisHost != "" {
RedisHost = redisHost
}
redisPort := os.Getenv("REDIS_PORT")
if redisPort != "" {
port, err := strconv.ParseUint(redisPort, 10, 32)
if err != nil {
RedisPort = uint32(port)
}
}
redisPasswd := os.Getenv("REDIS_PASSWORD")
if redisPasswd != "" {
RedisPasswd = redisPasswd
}
redisMaxConn := os.Getenv("REDIS_MAX_CONNECTIONS")
if redisMaxConn != "" {
maxConn, err := strconv.ParseUint(redisMaxConn, 10, 32)
if err != nil {
RedisMaxConn = uint32(maxConn)
}
}
redisExpiry := os.Getenv("REDIS_EXPIRY")
if redisExpiry != "" {
expiry, err := strconv.ParseUint(redisExpiry, 10, 32)
if err != nil {
RedisExpiry = uint32(expiry)
}
}
}
func LoadSeahubConfig() error {
JWTPrivateKey = os.Getenv("JWT_PRIVATE_KEY")
if JWTPrivateKey == "" {
return fmt.Errorf("failed to read JWT_PRIVATE_KEY")
}
siteRoot := os.Getenv("SITE_ROOT")
if siteRoot != "" {
SeahubURL = fmt.Sprintf("http://127.0.0.1:8000%sapi/v2.1/internal", siteRoot)
} else {
SeahubURL = "http://127.0.0.1:8000/api/v2.1/internal"
}
return nil
}
func LoadDBOption(centralDir string) (*DBOption, error) {
dbOpt, err := loadDBOptionFromFile(centralDir)
if err != nil {
log.Warnf("failed to load database config: %v", err)
}
dbOpt = loadDBOptionFromEnv(dbOpt)
if dbOpt.Host == "" {
return nil, fmt.Errorf("no database host in seafile.conf.")
}
if dbOpt.User == "" {
return nil, fmt.Errorf("no database user in seafile.conf.")
}
if dbOpt.Password == "" {
return nil, fmt.Errorf("no database password in seafile.conf.")
}
DBType = dbOpt.DBEngine
return dbOpt, nil
}
func loadDBOptionFromFile(centralDir string) (*DBOption, error) {
dbOpt := new(DBOption)
dbOpt.DBEngine = "mysql"
seafileConfPath := filepath.Join(centralDir, "seafile.conf")
opts := ini.LoadOptions{}
opts.SpaceBeforeInlineComment = true
config, err := ini.LoadSources(opts, seafileConfPath)
if err != nil {
return nil, fmt.Errorf("failed to load seafile.conf: %v", err)
}
section, err := config.GetSection("database")
if err != nil {
return dbOpt, nil
}
dbEngine := "mysql"
key, err := section.GetKey("type")
if err == nil {
dbEngine = key.String()
}
if dbEngine != "mysql" {
return nil, fmt.Errorf("unsupported database %s.", dbEngine)
}
dbOpt.DBEngine = dbEngine
if key, err = section.GetKey("host"); err == nil {
dbOpt.Host = key.String()
}
// user is required.
if key, err = section.GetKey("user"); err == nil {
dbOpt.User = key.String()
}
if key, err = section.GetKey("password"); err == nil {
dbOpt.Password = key.String()
}
if key, err = section.GetKey("db_name"); err == nil {
dbOpt.SeafileDbName = key.String()
}
port := 3306
if key, err = section.GetKey("port"); err == nil {
port, _ = key.Int()
}
dbOpt.Port = port
useTLS := false
if key, err = section.GetKey("use_ssl"); err == nil {
useTLS, _ = key.Bool()
}
dbOpt.UseTLS = useTLS
skipVerify := false
if key, err = section.GetKey("skip_verify"); err == nil {
skipVerify, _ = key.Bool()
}
dbOpt.SkipVerify = skipVerify
if key, err = section.GetKey("ca_path"); err == nil {
dbOpt.CaPath = key.String()
}
if key, err = section.GetKey("connection_charset"); err == nil {
dbOpt.Charset = key.String()
}
return dbOpt, nil
}
func loadDBOptionFromEnv(dbOpt *DBOption) *DBOption {
user := os.Getenv("SEAFILE_MYSQL_DB_USER")
password := os.Getenv("SEAFILE_MYSQL_DB_PASSWORD")
host := os.Getenv("SEAFILE_MYSQL_DB_HOST")
portStr := os.Getenv("SEAFILE_MYSQL_DB_PORT")
ccnetDbName := os.Getenv("SEAFILE_MYSQL_DB_CCNET_DB_NAME")
seafileDbName := os.Getenv("SEAFILE_MYSQL_DB_SEAFILE_DB_NAME")
if dbOpt == nil {
dbOpt = new(DBOption)
}
if user != "" {
dbOpt.User = user
}
if password != "" {
dbOpt.Password = password
}
if host != "" {
dbOpt.Host = host
}
if portStr != "" {
port, _ := strconv.ParseUint(portStr, 10, 32)
if port > 0 {
dbOpt.Port = int(port)
}
}
if dbOpt.Port == 0 {
dbOpt.Port = 3306
}
if ccnetDbName != "" {
dbOpt.CcnetDbName = ccnetDbName
} else if dbOpt.CcnetDbName == "" {
dbOpt.CcnetDbName = "ccnet_db"
log.Infof("Failed to read SEAFILE_MYSQL_DB_CCNET_DB_NAME, use ccnet_db by default")
}
if seafileDbName != "" {
dbOpt.SeafileDbName = seafileDbName
} else if dbOpt.SeafileDbName == "" {
dbOpt.SeafileDbName = "seafile_db"
log.Infof("Failed to read SEAFILE_MYSQL_DB_SEAFILE_DB_NAME, use seafile_db by default")
}
return dbOpt
}
================================================
FILE: fileserver/quota.go
================================================
package main
import (
"context"
"database/sql"
"fmt"
"github.com/haiwen/seafile-server/fileserver/option"
"github.com/haiwen/seafile-server/fileserver/repomgr"
)
// InfiniteQuota indicates that the quota is unlimited.
const (
InfiniteQuota = -2
)
func checkQuota(repoID string, delta int64) (int, error) {
if repoID == "" {
err := fmt.Errorf("bad argumets")
return -1, err
}
vInfo, err := repomgr.GetVirtualRepoInfo(repoID)
if err != nil {
err := fmt.Errorf("failed to get virtual repo: %v", err)
return -1, err
}
rRepoID := repoID
if vInfo != nil {
rRepoID = vInfo.OriginRepoID
}
user, err := repomgr.GetRepoOwner(rRepoID)
if err != nil {
err := fmt.Errorf("failed to get repo owner: %v", err)
return -1, err
}
if user == "" {
err := fmt.Errorf("repo %s has no owner", repoID)
return -1, err
}
quota, err := getUserQuota(user)
if err != nil {
err := fmt.Errorf("failed to get user quota: %v", err)
return -1, err
}
if quota == InfiniteQuota {
return 0, nil
}
usage, err := getUserUsage(user)
if err != nil || usage < 0 {
err := fmt.Errorf("failed to get user usage: %v", err)
return -1, err
}
usage += delta
if usage >= quota {
return 1, nil
}
return 0, nil
}
func getUserQuota(user string) (int64, error) {
var quota int64
sqlStr := "SELECT quota FROM UserQuota WHERE user=?"
ctx, cancel := context.WithTimeout(context.Background(), option.DBOpTimeout)
defer cancel()
row := seafileDB.QueryRowContext(ctx, sqlStr, user)
if err := row.Scan("a); err != nil {
if err != sql.ErrNoRows {
return -1, err
}
}
if quota <= 0 {
quota = option.DefaultQuota
}
return quota, nil
}
func getUserUsage(user string) (int64, error) {
var usage sql.NullInt64
sqlStr := "SELECT SUM(size) FROM " +
"RepoOwner o LEFT JOIN VirtualRepo v ON o.repo_id=v.repo_id, " +
"RepoSize WHERE " +
"owner_id=? AND o.repo_id=RepoSize.repo_id " +
"AND v.repo_id IS NULL"
ctx, cancel := context.WithTimeout(context.Background(), option.DBOpTimeout)
defer cancel()
row := seafileDB.QueryRowContext(ctx, sqlStr, user)
if err := row.Scan(&usage); err != nil {
if err != sql.ErrNoRows {
return -1, err
}
}
if usage.Valid {
return usage.Int64, nil
}
return 0, nil
}
================================================
FILE: fileserver/repomgr/repomgr.go
================================================
// Package repomgr manages repo objects and file operations in repos.
package repomgr
import (
"context"
"database/sql"
"fmt"
"time"
// Change to non-blank imports when use
_ "github.com/haiwen/seafile-server/fileserver/blockmgr"
"github.com/haiwen/seafile-server/fileserver/commitmgr"
"github.com/haiwen/seafile-server/fileserver/option"
log "github.com/sirupsen/logrus"
)
// Repo status
const (
RepoStatusNormal = iota
RepoStatusReadOnly
NRepoStatus
)
// Repo contains information about a repo.
type Repo struct {
ID string
Name string
Desc string
LastModifier string
LastModificationTime int64
HeadCommitID string
RootID string
IsCorrupted bool
// Set when repo is virtual
VirtualInfo *VRepoInfo
// ID for fs and block store
StoreID string
// Encrypted repo info
IsEncrypted bool
EncVersion int
Magic string
RandomKey string
Salt string
PwdHash string
PwdHashAlgo string
PwdHashParams string
Version int
}
// VRepoInfo contains virtual repo information.
type VRepoInfo struct {
RepoID string
OriginRepoID string
Path string
BaseCommitID string
}
var seafileDB *sql.DB
// Init initialize status of repomgr package
func Init(seafDB *sql.DB) {
seafileDB = seafDB
}
// Get returns Repo object by repo ID.
func Get(id string) *Repo {
query := `SELECT r.repo_id, b.commit_id, v.origin_repo, v.path, v.base_commit FROM ` +
`Repo r LEFT JOIN Branch b ON r.repo_id = b.repo_id ` +
`LEFT JOIN VirtualRepo v ON r.repo_id = v.repo_id ` +
`WHERE r.repo_id = ? AND b.name = 'master'`
ctx, cancel := context.WithTimeout(context.Background(), option.DBOpTimeout)
defer cancel()
stmt, err := seafileDB.PrepareContext(ctx, query)
if err != nil {
log.Errorf("failed to prepare sql : %s :%v", query, err)
return nil
}
defer stmt.Close()
rows, err := stmt.QueryContext(ctx, id)
if err != nil {
log.Errorf("failed to query sql : %v", err)
return nil
}
defer rows.Close()
repo := new(Repo)
var originRepoID sql.NullString
var path sql.NullString
var baseCommitID sql.NullString
if rows.Next() {
err := rows.Scan(&repo.ID, &repo.HeadCommitID, &originRepoID, &path, &baseCommitID)
if err != nil {
log.Errorf("failed to scan sql rows : %v", err)
return nil
}
} else {
return nil
}
if repo.HeadCommitID == "" {
log.Errorf("repo %s is corrupted", id)
return nil
}
if originRepoID.Valid {
repo.VirtualInfo = new(VRepoInfo)
repo.VirtualInfo.RepoID = id
repo.VirtualInfo.OriginRepoID = originRepoID.String
repo.StoreID = originRepoID.String
if path.Valid {
repo.VirtualInfo.Path = path.String
}
if baseCommitID.Valid {
repo.VirtualInfo.BaseCommitID = baseCommitID.String
}
} else {
repo.StoreID = repo.ID
}
commit, err := commitmgr.Load(repo.ID, repo.HeadCommitID)
if err != nil {
log.Errorf("failed to load commit %s/%s : %v", repo.ID, repo.HeadCommitID, err)
return nil
}
repo.Name = commit.RepoName
repo.Desc = commit.RepoDesc
repo.LastModifier = commit.CreatorName
repo.LastModificationTime = commit.Ctime
repo.RootID = commit.RootID
repo.Version = commit.Version
if commit.Encrypted == "true" {
repo.IsEncrypted = true
repo.EncVersion = commit.EncVersion
if repo.EncVersion == 1 && commit.PwdHash == "" {
repo.Magic = commit.Magic
} else if repo.EncVersion == 2 {
repo.RandomKey = commit.RandomKey
} else if repo.EncVersion == 3 {
repo.RandomKey = commit.RandomKey
repo.Salt = commit.Salt
} else if repo.EncVersion == 4 {
repo.RandomKey = commit.RandomKey
repo.Salt = commit.Salt
}
if repo.EncVersion >= 2 && commit.PwdHash == "" {
repo.Magic = commit.Magic
}
if commit.PwdHash != "" {
repo.PwdHash = commit.PwdHash
repo.PwdHashAlgo = commit.PwdHashAlgo
repo.PwdHashParams = commit.PwdHashParams
}
}
return repo
}
// RepoToCommit converts Repo to Commit.
func RepoToCommit(repo *Repo, commit *commitmgr.Commit) {
commit.RepoID = repo.ID
commit.RepoName = repo.Name
if repo.IsEncrypted {
commit.Encrypted = "true"
commit.EncVersion = repo.EncVersion
if repo.EncVersion == 1 && repo.PwdHash == "" {
commit.Magic = repo.Magic
} else if repo.EncVersion == 2 {
commit.RandomKey = repo.RandomKey
} else if repo.EncVersion == 3 {
commit.RandomKey = repo.RandomKey
commit.Salt = repo.Salt
} else if repo.EncVersion == 4 {
commit.RandomKey = repo.RandomKey
commit.Salt = repo.Salt
}
if repo.EncVersion >= 2 && repo.PwdHash == "" {
commit.Magic = repo.Magic
}
if repo.PwdHash != "" {
commit.PwdHash = repo.PwdHash
commit.PwdHashAlgo = repo.PwdHashAlgo
commit.PwdHashParams = repo.PwdHashParams
}
} else {
commit.Encrypted = "false"
}
commit.Version = repo.Version
}
// GetEx return repo object even if it's corrupted.
func GetEx(id string) *Repo {
repo := new(Repo)
query := `SELECT r.repo_id, b.commit_id, v.origin_repo, v.path, v.base_commit FROM ` +
`Repo r LEFT JOIN Branch b ON r.repo_id = b.repo_id ` +
`LEFT JOIN VirtualRepo v ON r.repo_id = v.repo_id ` +
`WHERE r.repo_id = ? AND b.name = 'master'`
ctx, cancel := context.WithTimeout(context.Background(), option.DBOpTimeout)
defer cancel()
stmt, err := seafileDB.PrepareContext(ctx, query)
if err != nil {
repo.IsCorrupted = true
return repo
}
defer stmt.Close()
rows, err := stmt.QueryContext(ctx, id)
if err != nil {
repo.IsCorrupted = true
return repo
}
defer rows.Close()
var originRepoID sql.NullString
var path sql.NullString
var baseCommitID sql.NullString
if rows.Next() {
err := rows.Scan(&repo.ID, &repo.HeadCommitID, &originRepoID, &path, &baseCommitID)
if err != nil {
repo.IsCorrupted = true
return repo
}
} else if rows.Err() != nil {
repo.IsCorrupted = true
return repo
} else {
return nil
}
if originRepoID.Valid {
repo.VirtualInfo = new(VRepoInfo)
repo.VirtualInfo.RepoID = id
repo.VirtualInfo.OriginRepoID = originRepoID.String
repo.StoreID = originRepoID.String
if path.Valid {
repo.VirtualInfo.Path = path.String
}
if baseCommitID.Valid {
repo.VirtualInfo.BaseCommitID = baseCommitID.String
}
} else {
repo.StoreID = repo.ID
}
if repo.HeadCommitID == "" {
repo.IsCorrupted = true
return repo
}
commit, err := commitmgr.Load(repo.ID, repo.HeadCommitID)
if err != nil {
log.Errorf("failed to load commit %s/%s : %v", repo.ID, repo.HeadCommitID, err)
repo.IsCorrupted = true
return repo
}
repo.Name = commit.RepoName
repo.LastModifier = commit.CreatorName
repo.LastModificationTime = commit.Ctime
repo.RootID = commit.RootID
repo.Version = commit.Version
if commit.Encrypted == "true" {
repo.IsEncrypted = true
repo.EncVersion = commit.EncVersion
if repo.EncVersion == 1 {
repo.Magic = commit.Magic
} else if repo.EncVersion == 2 {
repo.Magic = commit.Magic
repo.RandomKey = commit.RandomKey
} else if repo.EncVersion == 3 {
repo.Magic = commit.Magic
repo.RandomKey = commit.RandomKey
repo.Salt = commit.Salt
} else if repo.EncVersion == 4 {
repo.Magic = commit.Magic
repo.RandomKey = commit.RandomKey
repo.Salt = commit.Salt
}
if commit.PwdHash != "" {
repo.PwdHash = commit.PwdHash
repo.PwdHashAlgo = commit.PwdHashAlgo
repo.PwdHashParams = commit.PwdHashParams
}
}
return repo
}
// GetVirtualRepoInfo return virtual repo info by repo id.
func GetVirtualRepoInfo(repoID string) (*VRepoInfo, error) {
sqlStr := "SELECT repo_id, origin_repo, path, base_commit FROM VirtualRepo WHERE repo_id = ?"
vRepoInfo := new(VRepoInfo)
ctx, cancel := context.WithTimeout(context.Background(), option.DBOpTimeout)
defer cancel()
row := seafileDB.QueryRowContext(ctx, sqlStr, repoID)
if err := row.Scan(&vRepoInfo.RepoID, &vRepoInfo.OriginRepoID, &vRepoInfo.Path, &vRepoInfo.BaseCommitID); err != nil {
if err != sql.ErrNoRows {
return nil, err
}
return nil, nil
}
return vRepoInfo, nil
}
// GetVirtualRepoInfoByOrigin return virtual repo info by origin repo id.
func GetVirtualRepoInfoByOrigin(originRepo string) ([]*VRepoInfo, error) {
sqlStr := "SELECT repo_id, origin_repo, path, base_commit " +
"FROM VirtualRepo WHERE origin_repo=?"
var vRepos []*VRepoInfo
ctx, cancel := context.WithTimeout(context.Background(), option.DBOpTimeout)
defer cancel()
row, err := seafileDB.QueryContext(ctx, sqlStr, originRepo)
if err != nil {
return nil, err
}
defer row.Close()
for row.Next() {
vRepoInfo := new(VRepoInfo)
if err := row.Scan(&vRepoInfo.RepoID, &vRepoInfo.OriginRepoID, &vRepoInfo.Path, &vRepoInfo.BaseCommitID); err != nil {
if err != sql.ErrNoRows {
return nil, err
}
}
vRepos = append(vRepos, vRepoInfo)
}
return vRepos, nil
}
// GetEmailByToken return user's email by token.
func GetEmailByToken(repoID string, token string) (string, error) {
var email string
sqlStr := "SELECT email FROM RepoUserToken WHERE repo_id = ? AND token = ?"
ctx, cancel := context.WithTimeout(context.Background(), option.DBOpTimeout)
defer cancel()
row := seafileDB.QueryRowContext(ctx, sqlStr, repoID, token)
if err := row.Scan(&email); err != nil {
if err != sql.ErrNoRows {
return email, err
}
}
return email, nil
}
// GetRepoStatus return repo status by repo id.
func GetRepoStatus(repoID string) (int, error) {
var status int = -1
// First, check origin repo's status.
sqlStr := "SELECT i.status FROM VirtualRepo v LEFT JOIN RepoInfo i " +
"ON i.repo_id=v.origin_repo WHERE v.repo_id=? " +
"AND i.repo_id IS NOT NULL"
ctx, cancel := context.WithTimeout(context.Background(), option.DBOpTimeout)
defer cancel()
row := seafileDB.QueryRowContext(ctx, sqlStr, repoID)
if err := row.Scan(&status); err != nil {
if err != sql.ErrNoRows {
return status, err
} else {
status = -1
}
}
if status >= 0 {
return status, nil
}
// Then, check repo's own status.
sqlStr = "SELECT status FROM RepoInfo WHERE repo_id=?"
row = seafileDB.QueryRowContext(ctx, sqlStr, repoID)
if err := row.Scan(&status); err != nil {
if err != sql.ErrNoRows {
return status, err
}
}
return status, nil
}
// TokenPeerInfoExists check if the token exists.
func TokenPeerInfoExists(token string) (bool, error) {
var exists string
sqlStr := "SELECT token FROM RepoTokenPeerInfo WHERE token=?"
ctx, cancel := context.WithTimeout(context.Background(), option.DBOpTimeout)
defer cancel()
row := seafileDB.QueryRowContext(ctx, sqlStr, token)
if err := row.Scan(&exists); err != nil {
if err != sql.ErrNoRows {
return false, err
}
return false, nil
}
return true, nil
}
// AddTokenPeerInfo add token peer info to RepoTokenPeerInfo table.
func AddTokenPeerInfo(token, peerID, peerIP, peerName, clientVer string, syncTime int64) error {
sqlStr := "INSERT INTO RepoTokenPeerInfo (token, peer_id, peer_ip, peer_name, sync_time, client_ver)" +
"VALUES (?, ?, ?, ?, ?, ?)"
ctx, cancel := context.WithTimeout(context.Background(), option.DBOpTimeout)
defer cancel()
if _, err := seafileDB.ExecContext(ctx, sqlStr, token, peerID, peerIP, peerName, syncTime, clientVer); err != nil {
return err
}
return nil
}
// UpdateTokenPeerInfo update token peer info to RepoTokenPeerInfo table.
func UpdateTokenPeerInfo(token, peerID, clientVer string, syncTime int64) error {
sqlStr := "UPDATE RepoTokenPeerInfo SET " +
"peer_ip=?, sync_time=?, client_ver=? WHERE token=?"
ctx, cancel := context.WithTimeout(context.Background(), option.DBOpTimeout)
defer cancel()
if _, err := seafileDB.ExecContext(ctx, sqlStr, peerID, syncTime, clientVer, token); err != nil {
return err
}
return nil
}
// GetUploadTmpFile gets the timp file path of upload file.
func GetUploadTmpFile(repoID, filePath string) (string, error) {
var filePathNoSlash string
if filePath[0] == '/' {
filePathNoSlash = filePath[1:]
} else {
filePathNoSlash = filePath
filePath = "/" + filePath
}
var tmpFile string
sqlStr := "SELECT tmp_file_path FROM WebUploadTempFiles WHERE repo_id = ? AND file_path = ?"
ctx, cancel := context.WithTimeout(context.Background(), option.DBOpTimeout)
defer cancel()
row := seafileDB.QueryRowContext(ctx, sqlStr, repoID, filePath)
if err := row.Scan(&tmpFile); err != nil {
if err != sql.ErrNoRows {
return "", err
}
}
if tmpFile == "" {
row := seafileDB.QueryRowContext(ctx, sqlStr, repoID, filePathNoSlash)
if err := row.Scan(&tmpFile); err != nil {
if err != sql.ErrNoRows {
return "", err
}
}
}
return tmpFile, nil
}
// AddUploadTmpFile adds the tmp file path of upload file.
func AddUploadTmpFile(repoID, filePath, tmpFile string) error {
if filePath[0] != '/' {
filePath = "/" + filePath
}
sqlStr := "INSERT INTO WebUploadTempFiles (repo_id, file_path, tmp_file_path) VALUES (?, ?, ?)"
ctx, cancel := context.WithTimeout(context.Background(), option.DBOpTimeout)
defer cancel()
_, err := seafileDB.ExecContext(ctx, sqlStr, repoID, filePath, tmpFile)
if err != nil {
return err
}
return nil
}
// DelUploadTmpFile deletes the tmp file path of upload file.
func DelUploadTmpFile(repoID, filePath string) error {
var filePathNoSlash string
if filePath[0] == '/' {
filePathNoSlash = filePath[1:]
} else {
filePathNoSlash = filePath
filePath = "/" + filePath
}
sqlStr := "DELETE FROM WebUploadTempFiles WHERE repo_id = ? AND file_path IN (?, ?)"
ctx, cancel := context.WithTimeout(context.Background(), option.DBOpTimeout)
defer cancel()
_, err := seafileDB.ExecContext(ctx, sqlStr, repoID, filePath, filePathNoSlash)
if err != nil {
return err
}
return nil
}
func setRepoCommitToDb(repoID, repoName string, updateTime int64, version int, isEncrypted string, lastModifier string) error {
var exists int
var encrypted int
sqlStr := "SELECT 1 FROM RepoInfo WHERE repo_id=?"
ctx, cancel := context.WithTimeout(context.Background(), option.DBOpTimeout)
defer cancel()
row := seafileDB.QueryRowContext(ctx, sqlStr, repoID)
if err := row.Scan(&exists); err != nil {
if err != sql.ErrNoRows {
return err
}
}
if updateTime == 0 {
updateTime = time.Now().Unix()
}
if isEncrypted == "true" {
encrypted = 1
}
if exists == 1 {
sqlStr := "UPDATE RepoInfo SET name=?, update_time=?, version=?, is_encrypted=?, " +
"last_modifier=? WHERE repo_id=?"
if _, err := seafileDB.ExecContext(ctx, sqlStr, repoName, updateTime, version, encrypted, lastModifier, repoID); err != nil {
return err
}
} else {
sqlStr := "INSERT INTO RepoInfo (repo_id, name, update_time, version, is_encrypted, last_modifier) " +
"VALUES (?, ?, ?, ?, ?, ?)"
if _, err := seafileDB.ExecContext(ctx, sqlStr, repoID, repoName, updateTime, version, encrypted, lastModifier); err != nil {
return err
}
}
return nil
}
// SetVirtualRepoBaseCommitPath updates the table of VirtualRepo.
func SetVirtualRepoBaseCommitPath(repoID, baseCommitID, newPath string) error {
sqlStr := "UPDATE VirtualRepo SET base_commit=?, path=? WHERE repo_id=?"
ctx, cancel := context.WithTimeout(context.Background(), option.DBOpTimeout)
defer cancel()
if _, err := seafileDB.ExecContext(ctx, sqlStr, baseCommitID, newPath, repoID); err != nil {
return err
}
return nil
}
// GetVirtualRepoIDsByOrigin return the virtual repo ids by origin repo id.
func GetVirtualRepoIDsByOrigin(repoID string) ([]string, error) {
sqlStr := "SELECT repo_id FROM VirtualRepo WHERE origin_repo=?"
var id string
var ids []string
ctx, cancel := context.WithTimeout(context.Background(), option.DBOpTimeout)
defer cancel()
row, err := seafileDB.QueryContext(ctx, sqlStr, repoID)
if err != nil {
return nil, err
}
defer row.Close()
for row.Next() {
if err := row.Scan(&id); err != nil {
if err != sql.ErrNoRows {
return nil, err
}
}
ids = append(ids, id)
}
return ids, nil
}
// DelVirtualRepo deletes virtual repo from database.
func DelVirtualRepo(repoID string, cloudMode bool) error {
err := removeVirtualRepoOndisk(repoID, cloudMode)
if err != nil {
err := fmt.Errorf("failed to remove virtual repo on disk: %v", err)
return err
}
sqlStr := "DELETE FROM VirtualRepo WHERE repo_id = ?"
ctx, cancel := context.WithTimeout(context.Background(), option.DBOpTimeout)
defer cancel()
_, err = seafileDB.ExecContext(ctx, sqlStr, repoID)
if err != nil {
return err
}
return nil
}
func removeVirtualRepoOndisk(repoID string, cloudMode bool) error {
sqlStr := "DELETE FROM Repo WHERE repo_id = ?"
ctx, cancel := context.WithTimeout(context.Background(), option.DBOpTimeout)
defer cancel()
_, err := seafileDB.ExecContext(ctx, sqlStr, repoID)
if err != nil {
return err
}
sqlStr = "SELECT name, repo_id, commit_id FROM Branch WHERE repo_id=?"
rows, err := seafileDB.QueryContext(ctx, sqlStr, repoID)
if err != nil {
return err
}
defer rows.Close()
for rows.Next() {
var name, id, commitID string
if err := rows.Scan(&name, &id, &commitID); err != nil {
if err != sql.ErrNoRows {
return err
}
}
sqlStr := "DELETE FROM RepoHead WHERE branch_name = ? AND repo_id = ?"
_, err := seafileDB.ExecContext(ctx, sqlStr, name, id)
if err != nil {
return err
}
sqlStr = "DELETE FROM Branch WHERE name=? AND repo_id=?"
_, err = seafileDB.ExecContext(ctx, sqlStr, name, id)
if err != nil {
return err
}
}
sqlStr = "DELETE FROM RepoOwner WHERE repo_id = ?"
_, err = seafileDB.ExecContext(ctx, sqlStr, repoID)
if err != nil {
return err
}
sqlStr = "DELETE FROM SharedRepo WHERE repo_id = ?"
_, err = seafileDB.ExecContext(ctx, sqlStr, repoID)
if err != nil {
return err
}
sqlStr = "DELETE FROM RepoGroup WHERE repo_id = ?"
_, err = seafileDB.ExecContext(ctx, sqlStr, repoID)
if err != nil {
return err
}
if !cloudMode {
sqlStr = "DELETE FROM InnerPubRepo WHERE repo_id = ?"
_, err := seafileDB.ExecContext(ctx, sqlStr, repoID)
if err != nil {
return err
}
}
sqlStr = "DELETE FROM RepoUserToken WHERE repo_id = ?"
_, err = seafileDB.ExecContext(ctx, sqlStr, repoID)
if err != nil {
return err
}
sqlStr = "DELETE FROM RepoValidSince WHERE repo_id = ?"
_, err = seafileDB.ExecContext(ctx, sqlStr, repoID)
if err != nil {
return err
}
sqlStr = "DELETE FROM RepoSize WHERE repo_id = ?"
_, err = seafileDB.ExecContext(ctx, sqlStr, repoID)
if err != nil {
return err
}
var exists int
sqlStr = "SELECT 1 FROM GarbageRepos WHERE repo_id=?"
row := seafileDB.QueryRowContext(ctx, sqlStr, repoID)
if err := row.Scan(&exists); err != nil {
if err != sql.ErrNoRows {
return err
}
}
if exists == 0 {
sqlStr = "INSERT INTO GarbageRepos (repo_id) VALUES (?)"
_, err := seafileDB.ExecContext(ctx, sqlStr, repoID)
if err != nil {
return err
}
} else {
sqlStr = "REPLACE INTO GarbageRepos (repo_id) VALUES (?)"
_, err := seafileDB.ExecContext(ctx, sqlStr, repoID)
if err != nil {
return err
}
}
return nil
}
// IsVirtualRepo check if the repo is a virtual reop.
func IsVirtualRepo(repoID string) (bool, error) {
var exists int
sqlStr := "SELECT 1 FROM VirtualRepo WHERE repo_id = ?"
ctx, cancel := context.WithTimeout(context.Background(), option.DBOpTimeout)
defer cancel()
row := seafileDB.QueryRowContext(ctx, sqlStr, repoID)
if err := row.Scan(&exists); err != nil {
if err != sql.ErrNoRows {
return false, err
}
return false, nil
}
return true, nil
}
// GetRepoOwner get the owner of repo.
func GetRepoOwner(repoID string) (string, error) {
var owner string
sqlStr := "SELECT owner_id FROM RepoOwner WHERE repo_id=?"
ctx, cancel := context.WithTimeout(context.Background(), option.DBOpTimeout)
defer cancel()
row := seafileDB.QueryRowContext(ctx, sqlStr, repoID)
if err := row.Scan(&owner); err != nil {
if err != sql.ErrNoRows {
return "", err
}
}
return owner, nil
}
func UpdateRepoInfo(repoID, commitID string) error {
head, err := commitmgr.Load(repoID, commitID)
if err != nil {
err := fmt.Errorf("failed to get commit %s:%s", repoID, commitID)
return err
}
setRepoCommitToDb(repoID, head.RepoName, head.Ctime, head.Version, head.Encrypted, head.CreatorName)
return nil
}
func HasLastGCID(repoID, clientID string) (bool, error) {
sqlStr := "SELECT 1 FROM LastGCID WHERE repo_id = ? AND client_id = ?"
var exist int
ctx, cancel := context.WithTimeout(context.Background(), option.DBOpTimeout)
defer cancel()
row := seafileDB.QueryRowContext(ctx, sqlStr, repoID, clientID)
if err := row.Scan(&exist); err != nil {
if err != sql.ErrNoRows {
return false, err
}
}
if exist == 0 {
return false, nil
}
return true, nil
}
func GetLastGCID(repoID, clientID string) (string, error) {
sqlStr := "SELECT gc_id FROM LastGCID WHERE repo_id = ? AND client_id = ?"
var gcID sql.NullString
ctx, cancel := context.WithTimeout(context.Background(), option.DBOpTimeout)
defer cancel()
row := seafileDB.QueryRowContext(ctx, sqlStr, repoID, clientID)
if err := row.Scan(&gcID); err != nil {
if err != sql.ErrNoRows {
return "", err
}
}
return gcID.String, nil
}
func GetCurrentGCID(repoID string) (string, error) {
sqlStr := "SELECT gc_id FROM GCID WHERE repo_id = ?"
var gcID sql.NullString
ctx, cancel := context.WithTimeout(context.Background(), option.DBOpTimeout)
defer cancel()
row := seafileDB.QueryRowContext(ctx, sqlStr, repoID)
if err := row.Scan(&gcID); err != nil {
if err != sql.ErrNoRows {
return "", err
}
}
return gcID.String, nil
}
func RemoveLastGCID(repoID, clientID string) error {
sqlStr := "DELETE FROM LastGCID WHERE repo_id = ? AND client_id = ?"
ctx, cancel := context.WithTimeout(context.Background(), option.DBOpTimeout)
defer cancel()
if _, err := seafileDB.ExecContext(ctx, sqlStr, repoID, clientID); err != nil {
return err
}
return nil
}
func SetLastGCID(repoID, clientID, gcID string) error {
exist, err := HasLastGCID(repoID, clientID)
if err != nil {
return err
}
ctx, cancel := context.WithTimeout(context.Background(), option.DBOpTimeout)
defer cancel()
if exist {
sqlStr := "UPDATE LastGCID SET gc_id = ? WHERE repo_id = ? AND client_id = ?"
if _, err = seafileDB.ExecContext(ctx, sqlStr, gcID, repoID, clientID); err != nil {
return err
}
} else {
sqlStr := "INSERT INTO LastGCID (repo_id, client_id, gc_id) VALUES (?, ?, ?)"
if _, err = seafileDB.ExecContext(ctx, sqlStr, repoID, clientID, gcID); err != nil {
return err
}
}
return nil
}
================================================
FILE: fileserver/repomgr/repomgr_test.go
================================================
package repomgr
import (
"database/sql"
"fmt"
"os"
"testing"
_ "github.com/go-sql-driver/mysql"
"github.com/haiwen/seafile-server/fileserver/commitmgr"
"github.com/haiwen/seafile-server/fileserver/searpc"
)
const (
// repoID = "9646f13e-bbab-4eaf-9a84-fb6e1cd776b3"
user = "seafile"
password = "seafile"
host = "127.0.0.1"
port = 3306
dbName = "seafile-db"
useTLS = false
seafileConfPath = "/root/conf"
seafileDataDir = "/root/conf/seafile-data"
repoName = "repo"
userName = "seafile@seafile.com"
encVersion = 2
pipePath = "/root/runtime/seafile.sock"
service = "seafserv-threaded-rpcserver"
)
var repoID string
var client *searpc.Client
func createRepo() string {
id, err := client.Call("seafile_create_repo", repoName, "", userName, nil, encVersion)
if err != nil {
fmt.Printf("failed to create repo.\n")
}
if id == nil {
fmt.Printf("repo id is nil.\n")
os.Exit(1)
}
repoid, ok := id.(string)
if !ok {
fmt.Printf("returned value isn't repo id.\n")
}
return repoid
}
func delRepo() {
_, err := client.Call("seafile_destroy_repo", repoID)
if err != nil {
fmt.Printf("failed to del repo.\n")
os.Exit(1)
}
}
func TestMain(m *testing.M) {
client = searpc.Init(pipePath, service, 10)
repoID = createRepo()
dsn := fmt.Sprintf("%s:%s@tcp(%s:%d)/%s?tls=%t", user, password, host, port, dbName, useTLS)
seafDB, err := sql.Open("mysql", dsn)
if err != nil {
fmt.Printf("Failed to open database: %v", err)
}
Init(seafDB)
commitmgr.Init(seafileConfPath, seafileDataDir)
code := m.Run()
delRepo()
os.Exit(code)
}
func TestGet(t *testing.T) {
repo := Get(repoID)
if repo == nil {
t.Errorf("failed to get repo : %s.\n", repoID)
t.FailNow()
}
if repo.ID != repoID {
t.Errorf("failed to get repo : %s.\n", repoID)
}
}
================================================
FILE: fileserver/searpc/searpc.go
================================================
// Package searpc implements searpc client protocol with unix pipe transport.
package searpc
import (
"bufio"
"encoding/binary"
"encoding/json"
"fmt"
"io"
"net"
)
// Client represents a connections to the RPC server.
type Client struct {
// path of the named pipe
pipePath string
// RPC service name
Service string
pool chan *net.UnixConn
maxConn int
}
type request struct {
Service string `json:"service"`
Request string `json:"request"`
}
// Init initializes rpc client.
func Init(pipePath string, service string, maxConn int) *Client {
client := new(Client)
client.pipePath = pipePath
client.Service = service
client.maxConn = maxConn
client.pool = make(chan *net.UnixConn, maxConn)
return client
}
// Call calls the RPC function funcname with variadic parameters.
// The return value of the RPC function is return as interface{} type
// The true returned type can be int32, int64, string, struct (object), list of struct (objects) or JSON
func (c *Client) Call(funcname string, params ...interface{}) (interface{}, error) {
// TODO: use reflection to compose requests and parse results.
conn, err := c.getConn()
if err != nil {
return nil, err
}
hasErr := false
defer func() {
if hasErr {
conn.Close()
} else {
c.returnConn(conn)
}
}()
var req []interface{}
req = append(req, funcname)
req = append(req, params...)
jsonstr, err := json.Marshal(req)
if err != nil {
hasErr = true
err := fmt.Errorf("failed to encode rpc call to json : %v", err)
return nil, err
}
reqHeader := new(request)
reqHeader.Service = c.Service
reqHeader.Request = string(jsonstr)
jsonstr, err = json.Marshal(reqHeader)
if err != nil {
hasErr = true
err := fmt.Errorf("failed to convert object to json : %v", err)
return nil, err
}
header := make([]byte, 4)
binary.LittleEndian.PutUint32(header, uint32(len(jsonstr)))
_, err = conn.Write([]byte(header))
if err != nil {
hasErr = true
err := fmt.Errorf("Failed to write rpc request header : %v", err)
return nil, err
}
_, err = conn.Write([]byte(jsonstr))
if err != nil {
hasErr = true
err := fmt.Errorf("Failed to write rpc request body : %v", err)
return nil, err
}
reader := bufio.NewReader(conn)
buflen := make([]byte, 4)
_, err = io.ReadFull(reader, buflen)
if err != nil {
hasErr = true
err := fmt.Errorf("failed to read response header from rpc server : %v", err)
return nil, err
}
retlen := binary.LittleEndian.Uint32(buflen)
msg := make([]byte, retlen)
_, err = io.ReadFull(reader, msg)
if err != nil {
hasErr = true
err := fmt.Errorf("failed to read response body from rpc server : %v", err)
return nil, err
}
retlist := make(map[string]interface{})
err = json.Unmarshal(msg, &retlist)
if err != nil {
hasErr = true
err := fmt.Errorf("failed to decode rpc response : %v", err)
return nil, err
}
if _, ok := retlist["err_code"]; ok {
hasErr = true
err := fmt.Errorf("searpc server returned error : %v", retlist["err_msg"])
return nil, err
}
if _, ok := retlist["ret"]; ok {
ret := retlist["ret"]
return ret, nil
}
hasErr = true
err = fmt.Errorf("No value returned")
return nil, err
}
func (c *Client) getConn() (*net.UnixConn, error) {
select {
case conn := <-c.pool:
return conn, nil
default:
unixAddr, err := net.ResolveUnixAddr("unix", c.pipePath)
if err != nil {
err := fmt.Errorf("failed to resolve unix addr when calling rpc : %w", err)
return nil, err
}
conn, err := net.DialUnix("unix", nil, unixAddr)
if err != nil {
err := fmt.Errorf("failed to dial unix when calling rpc : %v", err)
return nil, err
}
return conn, nil
}
}
func (c *Client) returnConn(conn *net.UnixConn) {
select {
case c.pool <- conn:
default:
conn.Close()
}
}
================================================
FILE: fileserver/searpc/searpc_test.go
================================================
package searpc
import (
"os"
"testing"
)
const (
repoName = "repo"
userName = "seafile@seafile.com"
encVersion = 2
pipePath = "/root/runtime/seafile.sock"
service = "seafserv-threaded-rpcserver"
)
var client *Client
func TestMain(m *testing.M) {
client = Init(pipePath, service, 10)
code := m.Run()
os.Exit(code)
}
func TestCallRpc(t *testing.T) {
repoID, err := client.Call("seafile_create_repo", repoName, "", userName, nil, encVersion)
if err != nil {
t.Errorf("failed to create repo.\n")
}
if repoID == nil {
t.Errorf("repo id is nil.\n")
t.FailNow()
}
repo, err := client.Call("seafile_get_repo", repoID)
if err != nil {
t.Errorf("failed to get repo.\n")
}
if repo == nil {
t.Errorf("repo is nil.\n")
t.FailNow()
}
repoMap, ok := repo.(map[string]interface{})
if !ok {
t.Errorf("failed to assert the type.\n")
t.FailNow()
}
if repoMap["id"] != repoID {
t.Errorf("wrong repo id.\n")
}
repoList, err := client.Call("seafile_get_repo_list", -1, -1, "")
if err != nil {
t.Errorf("failed to get repo list.\n")
}
if repoList == nil {
t.Errorf("repo list is nil.\n")
t.FailNow()
}
var exists bool
repos, ok := repoList.([]interface{})
if !ok {
t.Errorf("failed to assert the type.\n")
t.FailNow()
}
for _, v := range repos {
repo, ok := v.(map[string]interface{})
if !ok {
t.Errorf("failed to assert the type.\n")
t.FailNow()
}
if repo["id"] == repoID {
exists = true
break
}
}
if exists != true {
t.Errorf("can't find repo %s in repo list.\n", repoID)
}
client.Call("seafile_destroy_repo", repoID)
}
================================================
FILE: fileserver/share/group/group.go
================================================
// Package group manages group membership and group shares.
package group
================================================
FILE: fileserver/share/public/public.go
================================================
// Package public manager inner public shares.
package public
================================================
FILE: fileserver/share/share.go
================================================
// Package share manages share relations.
// share: manages personal shares and provide high level permission check functions.
package share
import (
"context"
"database/sql"
"fmt"
"path/filepath"
"strconv"
"strings"
"github.com/haiwen/seafile-server/fileserver/option"
"github.com/haiwen/seafile-server/fileserver/repomgr"
log "github.com/sirupsen/logrus"
)
type group struct {
id int
groupName string
creatorName string
timestamp int64
parentGroupID int
}
var ccnetDB *sql.DB
var seafileDB *sql.DB
var groupTableName string
var cloudMode bool
// Init ccnetDB, seafileDB, groupTableName, cloudMode
func Init(cnDB *sql.DB, seafDB *sql.DB, grpTableName string, clMode bool) {
ccnetDB = cnDB
seafileDB = seafDB
groupTableName = grpTableName
cloudMode = clMode
}
// CheckPerm get user's repo permission
func CheckPerm(repoID string, user string) string {
var perm string
vInfo, err := repomgr.GetVirtualRepoInfo(repoID)
if err != nil {
log.Errorf("Failed to get virtual repo info by repo id %s: %v", repoID, err)
}
if vInfo != nil {
perm = checkVirtualRepoPerm(repoID, vInfo.OriginRepoID, user, vInfo.Path)
return perm
}
perm = checkRepoSharePerm(repoID, user)
return perm
}
func checkVirtualRepoPerm(repoID, originRepoID, user, vPath string) string {
owner, err := repomgr.GetRepoOwner(originRepoID)
if err != nil {
log.Errorf("Failed to get repo owner: %v", err)
}
var perm string
if owner != "" && owner == user {
perm = "rw"
return perm
}
perm = checkPermOnParentRepo(originRepoID, user, vPath)
if perm != "" {
return perm
}
perm = checkRepoSharePerm(originRepoID, user)
return perm
}
func getUserGroups(sqlStr string, args ...interface{}) ([]group, error) {
ctx, cancel := context.WithTimeout(context.Background(), option.DBOpTimeout)
defer cancel()
rows, err := ccnetDB.QueryContext(ctx, sqlStr, args...)
if err != nil {
return nil, err
}
defer rows.Close()
var groups []group
var g group
for rows.Next() {
if err := rows.Scan(&g.id, &g.groupName,
&g.creatorName, &g.timestamp,
&g.parentGroupID); err == nil {
groups = append(groups, g)
}
}
if err := rows.Err(); err != nil {
return nil, err
}
return groups, nil
}
func getGroupsByUser(userName string, returnAncestors bool) ([]group, error) {
sqlStr := fmt.Sprintf("SELECT g.group_id, group_name, creator_name, timestamp, parent_group_id FROM "+
"`%s` g, GroupUser u WHERE g.group_id = u.group_id AND user_name=? ORDER BY g.group_id DESC",
groupTableName)
groups, err := getUserGroups(sqlStr, userName)
if err != nil {
err := fmt.Errorf("Failed to get groups by user %s: %v", userName, err)
return nil, err
}
if !returnAncestors {
return groups, nil
}
sqlStr = ""
var ret []group
for _, group := range groups {
parentGroupID := group.parentGroupID
groupID := group.id
if parentGroupID != 0 {
if sqlStr == "" {
sqlStr = fmt.Sprintf("SELECT path FROM GroupStructure WHERE group_id IN (%d",
groupID)
} else {
sqlStr += fmt.Sprintf(", %d", groupID)
}
} else {
ret = append(ret, group)
}
}
if sqlStr != "" {
sqlStr += ")"
paths, err := getGroupPaths(sqlStr)
if err != nil {
log.Errorf("Failed to get group paths: %v", err)
}
if paths == "" {
err := fmt.Errorf("Failed to get groups path for user %s", userName)
return nil, err
}
sqlStr = fmt.Sprintf("SELECT g.group_id, group_name, creator_name, timestamp, parent_group_id FROM "+
"`%s` g WHERE g.group_id IN (%s) ORDER BY g.group_id DESC",
groupTableName, paths)
groups, err := getUserGroups(sqlStr)
if err != nil {
return nil, err
}
ret = append(ret, groups...)
}
return ret, nil
}
func getGroupPaths(sqlStr string) (string, error) {
var paths string
ctx, cancel := context.WithTimeout(context.Background(), option.DBOpTimeout)
defer cancel()
rows, err := ccnetDB.QueryContext(ctx, sqlStr)
if err != nil {
return paths, err
}
defer rows.Close()
var path string
for rows.Next() {
rows.Scan(&path)
if paths == "" {
paths = path
} else {
paths += fmt.Sprintf(", %s", path)
}
}
if err := rows.Err(); err != nil {
return "", err
}
return paths, nil
}
func checkGroupPermByUser(repoID string, userName string) (string, error) {
groups, err := getGroupsByUser(userName, false)
if err != nil {
return "", err
}
if len(groups) == 0 {
return "", nil
}
var sqlBuilder strings.Builder
sqlBuilder.WriteString("SELECT permission FROM RepoGroup WHERE repo_id = ? AND group_id IN (")
for i := 0; i < len(groups); i++ {
sqlBuilder.WriteString(strconv.Itoa(groups[i].id))
if i+1 < len(groups) {
sqlBuilder.WriteString(",")
}
}
sqlBuilder.WriteString(")")
ctx, cancel := context.WithTimeout(context.Background(), option.DBOpTimeout)
defer cancel()
rows, err := seafileDB.QueryContext(ctx, sqlBuilder.String(), repoID)
if err != nil {
err := fmt.Errorf("Failed to get group permission by user %s: %v", userName, err)
return "", err
}
defer rows.Close()
var perm string
var origPerm string
for rows.Next() {
if err := rows.Scan(&perm); err == nil {
if perm == "rw" {
origPerm = perm
} else if perm == "r" && origPerm == "" {
origPerm = perm
}
}
}
if err := rows.Err(); err != nil {
err := fmt.Errorf("Failed to get group permission for user %s: %v", userName, err)
return "", err
}
return origPerm, nil
}
func checkSharedRepoPerm(repoID string, email string) (string, error) {
sqlStr := "SELECT permission FROM SharedRepo WHERE repo_id=? AND to_email=?"
ctx, cancel := context.WithTimeout(context.Background(), option.DBOpTimeout)
defer cancel()
row := seafileDB.QueryRowContext(ctx, sqlStr, repoID, email)
var perm string
if err := row.Scan(&perm); err != nil {
if err != sql.ErrNoRows {
err := fmt.Errorf("Failed to check shared repo permission: %v", err)
return "", err
}
}
return perm, nil
}
func checkInnerPubRepoPerm(repoID string) (string, error) {
sqlStr := "SELECT permission FROM InnerPubRepo WHERE repo_id=?"
ctx, cancel := context.WithTimeout(context.Background(), option.DBOpTimeout)
defer cancel()
row := seafileDB.QueryRowContext(ctx, sqlStr, repoID)
var perm string
if err := row.Scan(&perm); err != nil {
if err != sql.ErrNoRows {
err := fmt.Errorf("Failed to check inner public repo permission: %v", err)
return "", err
}
}
return perm, nil
}
func checkRepoSharePerm(repoID string, userName string) string {
owner, err := repomgr.GetRepoOwner(repoID)
if err != nil {
log.Errorf("Failed to get repo owner: %v", err)
}
if owner != "" && owner == userName {
perm := "rw"
return perm
}
perm, err := checkSharedRepoPerm(repoID, userName)
if err != nil {
log.Errorf("Failed to get shared repo permission: %v", err)
}
if perm != "" {
return perm
}
perm, err = checkGroupPermByUser(repoID, userName)
if err != nil {
log.Errorf("Failed to get group permission by user %s: %v", userName, err)
}
if perm != "" {
return perm
}
if !cloudMode {
perm, err = checkInnerPubRepoPerm(repoID)
if err != nil {
log.Errorf("Failed to get inner pulic repo permission by repo id %s: %v", repoID, err)
return ""
}
return perm
}
return ""
}
func getSharedDirsToUser(originRepoID string, toEmail string) (map[string]string, error) {
dirs := make(map[string]string)
sqlStr := "SELECT v.path, s.permission FROM SharedRepo s, VirtualRepo v WHERE " +
"s.repo_id = v.repo_id AND s.to_email = ? AND v.origin_repo = ?"
ctx, cancel := context.WithTimeout(context.Background(), option.DBOpTimeout)
defer cancel()
rows, err := seafileDB.QueryContext(ctx, sqlStr, toEmail, originRepoID)
if err != nil {
err := fmt.Errorf("Failed to get shared directories by user %s: %v", toEmail, err)
return nil, err
}
defer rows.Close()
var path string
var perm string
for rows.Next() {
if err := rows.Scan(&path, &perm); err == nil {
dirs[path] = perm
}
}
if err := rows.Err(); err != nil {
err := fmt.Errorf("Failed to get shared directories by user %s: %v", toEmail, err)
return nil, err
}
return dirs, nil
}
func getDirPerm(perms map[string]string, path string) string {
tmp := path
var perm string
// If the path is empty, filepath.Dir returns ".". If the path consists entirely of separators,
// filepath.Dir returns a single separator.
for tmp != "/" && tmp != "." && tmp != "" {
if perm, exists := perms[tmp]; exists {
return perm
}
tmp = filepath.Dir(tmp)
}
return perm
}
func convertGroupListToStr(groups []group) string {
var groupIDs strings.Builder
for i, group := range groups {
groupIDs.WriteString(strconv.Itoa(group.id))
if i+1 < len(groups) {
groupIDs.WriteString(",")
}
}
return groupIDs.String()
}
func getSharedDirsToGroup(originRepoID string, groups []group) (map[string]string, error) {
dirs := make(map[string]string)
groupIDs := convertGroupListToStr(groups)
sqlStr := fmt.Sprintf("SELECT v.path, s.permission "+
"FROM RepoGroup s, VirtualRepo v WHERE "+
"s.repo_id = v.repo_id AND v.origin_repo = ? "+
"AND s.group_id in (%s)", groupIDs)
ctx, cancel := context.WithTimeout(context.Background(), option.DBOpTimeout)
defer cancel()
rows, err := seafileDB.QueryContext(ctx, sqlStr, originRepoID)
if err != nil {
err := fmt.Errorf("Failed to get shared directories: %v", err)
return nil, err
}
defer rows.Close()
var path string
var perm string
for rows.Next() {
if err := rows.Scan(&path, &perm); err == nil {
dirs[path] = perm
}
}
if err := rows.Err(); err != nil {
err := fmt.Errorf("Failed to get shared directories: %v", err)
return nil, err
}
return dirs, nil
}
func checkPermOnParentRepo(originRepoID, user, vPath string) string {
var perm string
userPerms, err := getSharedDirsToUser(originRepoID, user)
if err != nil {
log.Errorf("Failed to get all shared folder perms in parent repo %.8s for user %s", originRepoID, user)
return ""
}
if len(userPerms) > 0 {
perm = getDirPerm(userPerms, vPath)
if perm != "" {
return perm
}
}
groups, err := getGroupsByUser(user, false)
if err != nil {
log.Errorf("Failed to get groups by user %s: %v", user, err)
}
if len(groups) == 0 {
return perm
}
groupPerms, err := getSharedDirsToGroup(originRepoID, groups)
if err != nil {
log.Errorf("Failed to get all shared folder perm from parent repo %.8s to all user groups", originRepoID)
return ""
}
if len(groupPerms) == 0 {
return ""
}
perm = getDirPerm(groupPerms, vPath)
return perm
}
// SharedRepo is a shared repo object
type SharedRepo struct {
Version int `json:"version"`
ID string `json:"id"`
HeadCommitID string `json:"head_commit_id"`
Name string `json:"name"`
MTime int64 `json:"mtime"`
Permission string `json:"permission"`
Type string `json:"type"`
Owner string `json:"owner"`
RepoType string `json:"-"`
}
// GetReposByOwner get repos by owner
func GetReposByOwner(email string) ([]*SharedRepo, error) {
var repos []*SharedRepo
query := "SELECT o.repo_id, b.commit_id, i.name, " +
"i.version, i.update_time, i.last_modifier, i.type FROM " +
"RepoOwner o LEFT JOIN Branch b ON o.repo_id = b.repo_id " +
"LEFT JOIN RepoInfo i ON o.repo_id = i.repo_id " +
"LEFT JOIN VirtualRepo v ON o.repo_id = v.repo_id " +
"WHERE owner_id=? AND " +
"v.repo_id IS NULL " +
"ORDER BY i.update_time DESC, o.repo_id"
ctx, cancel := context.WithTimeout(context.Background(), option.DBOpTimeout)
defer cancel()
stmt, err := seafileDB.PrepareContext(ctx, query)
if err != nil {
return nil, err
}
defer stmt.Close()
rows, err := stmt.QueryContext(ctx, email)
if err != nil {
return nil, err
}
defer rows.Close()
for rows.Next() {
repo := new(SharedRepo)
var repoName, lastModifier, repoType sql.NullString
if err := rows.Scan(&repo.ID, &repo.HeadCommitID,
&repoName, &repo.Version, &repo.MTime,
&lastModifier, &repoType); err == nil {
if repo.HeadCommitID == "" {
continue
}
if !repoName.Valid || !lastModifier.Valid {
continue
}
if repoName.String == "" || lastModifier.String == "" {
continue
}
repo.Name = repoName.String
if repoType.Valid {
repo.RepoType = repoType.String
}
repos = append(repos, repo)
}
}
if err := rows.Err(); err != nil {
return nil, err
}
return repos, nil
}
// ListInnerPubRepos get inner public repos
func ListInnerPubRepos() ([]*SharedRepo, error) {
query := "SELECT InnerPubRepo.repo_id, " +
"owner_id, permission, commit_id, i.name, " +
"i.update_time, i.version, i.type " +
"FROM InnerPubRepo " +
"LEFT JOIN RepoInfo i ON InnerPubRepo.repo_id = i.repo_id, RepoOwner, Branch " +
"WHERE InnerPubRepo.repo_id=RepoOwner.repo_id AND " +
"InnerPubRepo.repo_id = Branch.repo_id AND Branch.name = 'master'"
ctx, cancel := context.WithTimeout(context.Background(), option.DBOpTimeout)
defer cancel()
stmt, err := seafileDB.PrepareContext(ctx, query)
if err != nil {
return nil, err
}
defer stmt.Close()
rows, err := stmt.QueryContext(ctx)
if err != nil {
return nil, err
}
defer rows.Close()
var repos []*SharedRepo
for rows.Next() {
repo := new(SharedRepo)
var repoName, repoType sql.NullString
if err := rows.Scan(&repo.ID, &repo.Owner,
&repo.Permission, &repo.HeadCommitID, &repoName,
&repo.MTime, &repo.Version, &repoType); err == nil {
if !repoName.Valid {
continue
}
if repoName.String == "" {
continue
}
repo.Name = repoName.String
if repoType.Valid {
repo.RepoType = repoType.String
}
repos = append(repos, repo)
}
}
if err := rows.Err(); err != nil {
return nil, err
}
return repos, nil
}
// ListShareRepos list share repos by email
func ListShareRepos(email, columnType string) ([]*SharedRepo, error) {
var repos []*SharedRepo
var query string
if columnType == "from_email" {
query = "SELECT sh.repo_id, to_email, " +
"permission, commit_id, " +
"i.name, i.update_time, i.version, i.type FROM " +
"SharedRepo sh LEFT JOIN RepoInfo i ON sh.repo_id = i.repo_id, Branch b " +
"WHERE from_email=? AND " +
"sh.repo_id = b.repo_id AND " +
"b.name = 'master' " +
"ORDER BY i.update_time DESC, sh.repo_id"
} else if columnType == "to_email" {
query = "SELECT sh.repo_id, from_email, " +
"permission, commit_id, " +
"i.name, i.update_time, i.version, i.type FROM " +
"SharedRepo sh LEFT JOIN RepoInfo i ON sh.repo_id = i.repo_id, Branch b " +
"WHERE to_email=? AND " +
"sh.repo_id = b.repo_id AND " +
"b.name = 'master' " +
"ORDER BY i.update_time DESC, sh.repo_id"
} else {
err := fmt.Errorf("Wrong column type: %s", columnType)
return nil, err
}
ctx, cancel := context.WithTimeout(context.Background(), option.DBOpTimeout)
defer cancel()
stmt, err := seafileDB.PrepareContext(ctx, query)
if err != nil {
return nil, err
}
defer stmt.Close()
rows, err := stmt.QueryContext(ctx, email)
if err != nil {
return nil, err
}
defer rows.Close()
for rows.Next() {
repo := new(SharedRepo)
var repoName, repoType sql.NullString
if err := rows.Scan(&repo.ID, &repo.Owner,
&repo.Permission, &repo.HeadCommitID,
&repoName, &repo.MTime, &repo.Version, &repoType); err == nil {
if !repoName.Valid {
continue
}
if repoName.String == "" {
continue
}
repo.Name = repoName.String
if repoType.Valid {
repo.RepoType = repoType.String
}
repos = append(repos, repo)
}
}
if err := rows.Err(); err != nil {
return nil, err
}
return repos, nil
}
// GetGroupReposByUser get group repos by user
func GetGroupReposByUser(user string, orgID int) ([]*SharedRepo, error) {
groups, err := getGroupsByUser(user, true)
if err != nil {
return nil, err
}
if len(groups) == 0 {
return nil, nil
}
var sqlBuilder strings.Builder
if orgID < 0 {
sqlBuilder.WriteString("SELECT g.repo_id, " +
"user_name, permission, commit_id, " +
"i.name, i.update_time, i.version, i.type " +
"FROM RepoGroup g " +
"LEFT JOIN RepoInfo i ON g.repo_id = i.repo_id, " +
"Branch b WHERE g.repo_id = b.repo_id AND " +
"b.name = 'master' AND group_id IN (")
} else {
sqlBuilder.WriteString("SELECT g.repo_id, " +
"owner, permission, commit_id, " +
"i.name, i.update_time, i.version, i.type " +
"FROM OrgGroupRepo g " +
"LEFT JOIN RepoInfo i ON g.repo_id = i.repo_id, " +
"Branch b WHERE g.repo_id = b.repo_id AND " +
"b.name = 'master' AND group_id IN (")
}
for i := 0; i < len(groups); i++ {
sqlBuilder.WriteString(strconv.Itoa(groups[i].id))
if i+1 < len(groups) {
sqlBuilder.WriteString(",")
}
}
sqlBuilder.WriteString(" ) ORDER BY group_id")
ctx, cancel := context.WithTimeout(context.Background(), option.DBOpTimeout)
defer cancel()
rows, err := seafileDB.QueryContext(ctx, sqlBuilder.String())
if err != nil {
return nil, err
}
defer rows.Close()
var repos []*SharedRepo
for rows.Next() {
gRepo := new(SharedRepo)
var repoType sql.NullString
if err := rows.Scan(&gRepo.ID, &gRepo.Owner,
&gRepo.Permission, &gRepo.HeadCommitID,
&gRepo.Name, &gRepo.MTime, &gRepo.Version, &repoType); err == nil {
if repoType.Valid {
gRepo.RepoType = repoType.String
}
repos = append(repos, gRepo)
}
}
if err := rows.Err(); err != nil {
return nil, err
}
return repos, nil
}
================================================
FILE: fileserver/size_sched.go
================================================
package main
import (
"context"
"encoding/json"
"fmt"
"path/filepath"
"time"
"gopkg.in/ini.v1"
"database/sql"
"github.com/go-redis/redis/v8"
"github.com/haiwen/seafile-server/fileserver/commitmgr"
"github.com/haiwen/seafile-server/fileserver/diff"
"github.com/haiwen/seafile-server/fileserver/fsmgr"
"github.com/haiwen/seafile-server/fileserver/option"
"github.com/haiwen/seafile-server/fileserver/repomgr"
"github.com/haiwen/seafile-server/fileserver/workerpool"
log "github.com/sirupsen/logrus"
)
const (
RepoSizeList = "repo_size_task"
)
var updateSizePool *workerpool.WorkPool
var redisClient *redis.Client
func sizeSchedulerInit() {
var n int = 1
var seafileConfPath string
if centralDir != "" {
seafileConfPath = filepath.Join(centralDir, "seafile.conf")
} else {
seafileConfPath = filepath.Join(absDataDir, "seafile.conf")
}
config, err := ini.Load(seafileConfPath)
if err != nil {
log.Fatalf("Failed to load seafile.conf: %v", err)
}
if section, err := config.GetSection("scheduler"); err == nil {
if key, err := section.GetKey("size_sched_thread_num"); err == nil {
num, err := key.Int()
if err == nil {
n = num
}
}
}
updateSizePool = workerpool.CreateWorkerPool(computeRepoSize, n)
server := fmt.Sprintf("%s:%d", option.RedisHost, option.RedisPort)
opt := &redis.Options{
Addr: server,
Password: option.RedisPasswd,
}
opt.PoolSize = n
redisClient = redis.NewClient(opt)
}
func computeRepoSize(args ...interface{}) error {
if len(args) < 1 {
return nil
}
repoID := args[0].(string)
var size int64
var fileCount int64
repo := repomgr.Get(repoID)
if repo == nil {
err := fmt.Errorf("failed to get repo %s", repoID)
return err
}
info, err := getOldRepoInfo(repoID)
if err != nil {
err := fmt.Errorf("failed to get old repo info: %v", err)
return err
}
if info != nil && info.HeadID == repo.HeadCommitID {
return nil
}
head, err := commitmgr.Load(repo.ID, repo.HeadCommitID)
if err != nil {
err := fmt.Errorf("failed to get head commit %s", repo.HeadCommitID)
return err
}
var oldHead *commitmgr.Commit
if info != nil {
commit, _ := commitmgr.Load(repo.ID, info.HeadID)
oldHead = commit
}
if info != nil && oldHead != nil {
var results []*diff.DiffEntry
var changeSize int64
var changeFileCount int64
err := diff.DiffCommits(oldHead, head, &results, false)
if err != nil {
err := fmt.Errorf("failed to do diff commits: %v", err)
return err
}
for _, de := range results {
if de.Status == diff.DiffStatusDeleted {
changeSize -= de.Size
changeFileCount--
} else if de.Status == diff.DiffStatusAdded {
changeSize += de.Size
changeFileCount++
} else if de.Status == diff.DiffStatusModified {
changeSize = changeSize + de.Size - de.OriginSize
}
}
size = info.Size + changeSize
fileCount = info.FileCount + changeFileCount
} else {
info, err := fsmgr.GetFileCountInfoByPath(repo.StoreID, repo.RootID, "/")
if err != nil {
err := fmt.Errorf("failed to get file count")
return err
}
fileCount = info.FileCount
size = info.Size
}
err = setRepoSizeAndFileCount(repoID, repo.HeadCommitID, size, fileCount)
if err != nil {
err := fmt.Errorf("failed to set repo size and file count %s: %v", repoID, err)
return err
}
err = notifyRepoSizeChange(repo.StoreID)
if err != nil {
log.Warnf("Failed to notify repo size change for repo %s: %v", repoID, err)
}
return nil
}
func setRepoSizeAndFileCount(repoID, newHeadID string, size, fileCount int64) error {
ctx, cancel := context.WithTimeout(context.Background(), option.DBOpTimeout)
defer cancel()
trans, err := seafileDB.BeginTx(ctx, nil)
if err != nil {
err := fmt.Errorf("failed to start transaction: %v", err)
return err
}
var headID string
sqlStr := "SELECT head_id FROM RepoSize WHERE repo_id=?"
row := trans.QueryRowContext(ctx, sqlStr, repoID)
if err := row.Scan(&headID); err != nil {
if err != sql.ErrNoRows {
trans.Rollback()
return err
}
}
if headID == "" {
sqlStr := "INSERT INTO RepoSize (repo_id, size, head_id) VALUES (?, ?, ?)"
_, err = trans.ExecContext(ctx, sqlStr, repoID, size, newHeadID)
if err != nil {
trans.Rollback()
return err
}
} else {
sqlStr = "UPDATE RepoSize SET size = ?, head_id = ? WHERE repo_id = ?"
_, err = trans.ExecContext(ctx, sqlStr, size, newHeadID, repoID)
if err != nil {
trans.Rollback()
return err
}
}
var exist int
sqlStr = "SELECT 1 FROM RepoFileCount WHERE repo_id=?"
row = trans.QueryRowContext(ctx, sqlStr, repoID)
if err := row.Scan(&exist); err != nil {
if err != sql.ErrNoRows {
trans.Rollback()
return err
}
}
if exist != 0 {
sqlStr := "UPDATE RepoFileCount SET file_count=? WHERE repo_id=?"
_, err = trans.ExecContext(ctx, sqlStr, fileCount, repoID)
if err != nil {
trans.Rollback()
return err
}
} else {
sqlStr := "INSERT INTO RepoFileCount (repo_id,file_count) VALUES (?,?)"
_, err = trans.ExecContext(ctx, sqlStr, repoID, fileCount)
if err != nil {
trans.Rollback()
return err
}
}
trans.Commit()
return nil
}
type RepoSizeChangeTask struct {
RepoID string `json:"repo_id"`
}
func notifyRepoSizeChange(repoID string) error {
if !option.HasRedisOptions {
return nil
}
task := &RepoSizeChangeTask{RepoID: repoID}
data, err := json.Marshal(task)
if err != nil {
return fmt.Errorf("failed to encode repo size change task: %w", err)
}
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
err = redisClient.LPush(ctx, RepoSizeList, data).Err()
if err != nil {
return fmt.Errorf("failed to push message to redis list %s: %w", RepoSizeList, err)
}
return nil
}
// RepoInfo contains repo information.
type RepoInfo struct {
HeadID string
Size int64
FileCount int64
}
func getOldRepoInfo(repoID string) (*RepoInfo, error) {
sqlStr := "select s.head_id,s.size,f.file_count FROM RepoSize s LEFT JOIN RepoFileCount f ON " +
"s.repo_id=f.repo_id WHERE s.repo_id=?"
repoInfo := new(RepoInfo)
ctx, cancel := context.WithTimeout(context.Background(), option.DBOpTimeout)
defer cancel()
row := seafileDB.QueryRowContext(ctx, sqlStr, repoID)
if err := row.Scan(&repoInfo.HeadID, &repoInfo.Size, &repoInfo.FileCount); err != nil {
if err != sql.ErrNoRows {
return nil, err
}
return nil, nil
}
return repoInfo, nil
}
================================================
FILE: fileserver/sync_api.go
================================================
package main
import (
"bytes"
"context"
"database/sql"
"encoding/binary"
"encoding/json"
"errors"
"fmt"
"html"
"io"
"net"
"net/http"
"strconv"
"strings"
"sync"
"time"
"github.com/gorilla/mux"
"github.com/haiwen/seafile-server/fileserver/blockmgr"
"github.com/haiwen/seafile-server/fileserver/commitmgr"
"github.com/haiwen/seafile-server/fileserver/diff"
"github.com/haiwen/seafile-server/fileserver/fsmgr"
"github.com/haiwen/seafile-server/fileserver/option"
"github.com/haiwen/seafile-server/fileserver/repomgr"
"github.com/haiwen/seafile-server/fileserver/share"
"github.com/haiwen/seafile-server/fileserver/utils"
"github.com/haiwen/seafile-server/fileserver/workerpool"
log "github.com/sirupsen/logrus"
)
type checkExistType int32
const (
checkFSExist checkExistType = 0
checkBlockExist checkExistType = 1
)
const (
seafileServerChannelEvent = "seaf_server.event"
seafileServerChannelStats = "seaf_server.stats"
emptySHA1 = "0000000000000000000000000000000000000000"
tokenExpireTime = 7200
permExpireTime = 7200
virtualRepoExpireTime = 7200
syncAPICleaningIntervalSec = 300
maxObjectPackSize = 1 << 20 // 1MB
fsIdWorkers = 10
)
var (
tokenCache sync.Map
permCache sync.Map
virtualRepoInfoCache sync.Map
calFsIdPool *workerpool.WorkPool
)
type tokenInfo struct {
repoID string
email string
expireTime int64
}
type permInfo struct {
perm string
expireTime int64
}
type virtualRepoInfo struct {
storeID string
expireTime int64
}
type repoEventData struct {
eType string
user string
ip string
repoID string
path string
clientName string
}
type statsEventData struct {
eType string
user string
repoID string
bytes uint64
}
func syncAPIInit() {
ticker := time.NewTicker(time.Second * syncAPICleaningIntervalSec)
go RecoverWrapper(func() {
for range ticker.C {
removeSyncAPIExpireCache()
}
})
calFsIdPool = workerpool.CreateWorkerPool(getFsId, fsIdWorkers)
}
type calResult struct {
user string
err *appError
}
func getFsId(args ...interface{}) error {
if len(args) < 3 {
return nil
}
resChan := args[0].(chan *calResult)
rsp := args[1].(http.ResponseWriter)
r := args[2].(*http.Request)
queries := r.URL.Query()
serverHead := queries.Get("server-head")
if !utils.IsObjectIDValid(serverHead) {
msg := "Invalid server-head parameter."
appErr := &appError{nil, msg, http.StatusBadRequest}
resChan <- &calResult{"", appErr}
return nil
}
clientHead := queries.Get("client-head")
if clientHead != "" && !utils.IsObjectIDValid(clientHead) {
msg := "Invalid client-head parameter."
appErr := &appError{nil, msg, http.StatusBadRequest}
resChan <- &calResult{"", appErr}
return nil
}
dirOnlyArg := queries.Get("dir-only")
var dirOnly bool
if dirOnlyArg != "" {
dirOnly = true
}
vars := mux.Vars(r)
repoID := vars["repoid"]
user, appErr := validateToken(r, repoID, false)
if appErr != nil {
resChan <- &calResult{user, appErr}
return nil
}
appErr = checkPermission(repoID, user, "download", false)
if appErr != nil {
resChan <- &calResult{user, appErr}
return nil
}
repo := repomgr.Get(repoID)
if repo == nil {
err := fmt.Errorf("Failed to find repo %.8s", repoID)
appErr := &appError{err, "", http.StatusInternalServerError}
resChan <- &calResult{user, appErr}
return nil
}
ret, err := calculateSendObjectList(r.Context(), repo, serverHead, clientHead, dirOnly)
if err != nil {
if !errors.Is(err, context.Canceled) {
err := fmt.Errorf("Failed to get fs id list: %w", err)
appErr := &appError{err, "", http.StatusInternalServerError}
resChan <- &calResult{user, appErr}
return nil
}
appErr := &appError{nil, "", http.StatusInternalServerError}
resChan <- &calResult{user, appErr}
return nil
}
var objList []byte
if ret != nil {
objList, err = json.Marshal(ret)
if err != nil {
appErr := &appError{err, "", http.StatusInternalServerError}
resChan <- &calResult{user, appErr}
return nil
}
} else {
// when get obj list is nil, return []
objList = []byte{'[', ']'}
}
rsp.Header().Set("Content-Length", strconv.Itoa(len(objList)))
rsp.WriteHeader(http.StatusOK)
rsp.Write(objList)
resChan <- &calResult{user, nil}
return nil
}
func permissionCheckCB(rsp http.ResponseWriter, r *http.Request) *appError {
queries := r.URL.Query()
op := queries.Get("op")
if op != "download" && op != "upload" {
msg := "op is invalid"
return &appError{nil, msg, http.StatusBadRequest}
}
clientID := queries.Get("client_id")
if clientID != "" && len(clientID) != 40 {
msg := "client_id is invalid"
return &appError{nil, msg, http.StatusBadRequest}
}
clientVer := queries.Get("client_ver")
if clientVer != "" {
status := validateClientVer(clientVer)
if status != http.StatusOK {
msg := "client_ver is invalid"
return &appError{nil, msg, status}
}
}
clientName := queries.Get("client_name")
if clientName != "" {
clientName = html.UnescapeString(clientName)
}
vars := mux.Vars(r)
repoID := vars["repoid"]
repo := repomgr.GetEx(repoID)
if repo == nil {
msg := "repo was deleted"
return &appError{nil, msg, seafHTTPResRepoDeleted}
}
if repo.IsCorrupted {
msg := "repo was corrupted"
return &appError{nil, msg, seafHTTPResRepoCorrupted}
}
user, err := validateToken(r, repoID, true)
if err != nil {
return err
}
err = checkPermission(repoID, user, op, true)
if err != nil {
return err
}
ip := getClientIPAddr(r)
if ip == "" {
token := r.Header.Get("Seafile-Repo-Token")
err := fmt.Errorf("%s failed to get client ip", token)
return &appError{err, "", http.StatusInternalServerError}
}
if op == "download" {
onRepoOper("repo-download-sync", repoID, user, ip, clientName)
}
if clientID != "" && clientName != "" {
token := r.Header.Get("Seafile-Repo-Token")
exists, err := repomgr.TokenPeerInfoExists(token)
if err != nil {
err := fmt.Errorf("Failed to check whether token %s peer info exist: %v", token, err)
return &appError{err, "", http.StatusInternalServerError}
}
if !exists {
if err := repomgr.AddTokenPeerInfo(token, clientID, ip, clientName, clientVer, int64(time.Now().Unix())); err != nil {
err := fmt.Errorf("Failed to add token peer info: %v", err)
return &appError{err, "", http.StatusInternalServerError}
}
} else {
if err := repomgr.UpdateTokenPeerInfo(token, clientID, clientVer, int64(time.Now().Unix())); err != nil {
err := fmt.Errorf("Failed to update token peer info: %v", err)
return &appError{err, "", http.StatusInternalServerError}
}
}
}
return nil
}
func getBlockMapCB(rsp http.ResponseWriter, r *http.Request) *appError {
vars := mux.Vars(r)
repoID := vars["repoid"]
fileID := vars["id"]
user, appErr := validateToken(r, repoID, false)
if appErr != nil {
return appErr
}
appErr = checkPermission(repoID, user, "download", false)
if appErr != nil {
return appErr
}
storeID, err := getRepoStoreID(repoID)
if err != nil {
err := fmt.Errorf("Failed to get repo store id by repo id %s: %v", repoID, err)
return &appError{err, "", http.StatusInternalServerError}
}
seafile, err := fsmgr.GetSeafile(storeID, fileID)
if err != nil {
msg := fmt.Sprintf("Failed to get seafile object by file id %s: %v", fileID, err)
return &appError{nil, msg, http.StatusNotFound}
}
var blockSizes []int64
for _, blockID := range seafile.BlkIDs {
blockSize, err := blockmgr.Stat(storeID, blockID)
if err != nil {
err := fmt.Errorf("Failed to find block %s/%s", storeID, blockID)
return &appError{err, "", http.StatusInternalServerError}
}
blockSizes = append(blockSizes, blockSize)
}
var data []byte
if blockSizes != nil {
data, err = json.Marshal(blockSizes)
if err != nil {
err := fmt.Errorf("Failed to marshal json: %v", err)
return &appError{err, "", http.StatusInternalServerError}
}
} else {
data = []byte{'[', ']'}
}
rsp.Header().Set("Content-Length", strconv.Itoa(len(data)))
rsp.WriteHeader(http.StatusOK)
rsp.Write(data)
return nil
}
func getAccessibleRepoListCB(rsp http.ResponseWriter, r *http.Request) *appError {
queries := r.URL.Query()
repoID := queries.Get("repo_id")
if repoID == "" || !utils.IsValidUUID(repoID) {
msg := "Invalid repo id."
return &appError{nil, msg, http.StatusBadRequest}
}
user, appErr := validateToken(r, repoID, false)
if appErr != nil {
return appErr
}
obtainedRepos := make(map[string]string)
repos, err := share.GetReposByOwner(user)
if err != nil {
err := fmt.Errorf("Failed to get repos by owner %s: %v", user, err)
return &appError{err, "", http.StatusInternalServerError}
}
var repoObjects []*share.SharedRepo
for _, repo := range repos {
if repo.RepoType != "" {
continue
}
if _, ok := obtainedRepos[repo.ID]; !ok {
obtainedRepos[repo.ID] = repo.ID
}
repo.Permission = "rw"
repo.Type = "repo"
repo.Owner = user
repoObjects = append(repoObjects, repo)
}
repos, err = share.ListShareRepos(user, "to_email")
if err != nil {
err := fmt.Errorf("Failed to get share repos by user %s: %v", user, err)
return &appError{err, "", http.StatusInternalServerError}
}
for _, sRepo := range repos {
if _, ok := obtainedRepos[sRepo.ID]; ok {
continue
}
if sRepo.RepoType != "" {
continue
}
sRepo.Type = "srepo"
sRepo.Owner = strings.ToLower(sRepo.Owner)
repoObjects = append(repoObjects, sRepo)
}
repos, err = share.GetGroupReposByUser(user, -1)
if err != nil {
err := fmt.Errorf("Failed to get group repos by user %s: %v", user, err)
return &appError{err, "", http.StatusInternalServerError}
}
reposTable := filterGroupRepos(repos)
for _, gRepo := range reposTable {
if _, ok := obtainedRepos[gRepo.ID]; ok {
continue
}
gRepo.Type = "grepo"
gRepo.Owner = strings.ToLower(gRepo.Owner)
repoObjects = append(repoObjects, gRepo)
}
repos, err = share.ListInnerPubRepos()
if err != nil {
err := fmt.Errorf("Failed to get inner public repos: %v", err)
return &appError{err, "", http.StatusInternalServerError}
}
for _, sRepo := range repos {
if _, ok := obtainedRepos[sRepo.ID]; ok {
continue
}
if sRepo.RepoType != "" {
continue
}
sRepo.Type = "grepo"
sRepo.Owner = "Organization"
repoObjects = append(repoObjects, sRepo)
}
var data []byte
if repoObjects != nil {
data, err = json.Marshal(repoObjects)
if err != nil {
err := fmt.Errorf("Failed to marshal json: %v", err)
return &appError{err, "", http.StatusInternalServerError}
}
} else {
data = []byte{'[', ']'}
}
rsp.Header().Set("Content-Length", strconv.Itoa(len(data)))
rsp.WriteHeader(http.StatusOK)
rsp.Write(data)
return nil
}
func filterGroupRepos(repos []*share.SharedRepo) map[string]*share.SharedRepo {
table := make(map[string]*share.SharedRepo)
for _, repo := range repos {
if repo.RepoType != "" {
continue
}
if repoPrev, ok := table[repo.ID]; ok {
if repo.Permission == "rw" && repoPrev.Permission == "r" {
table[repo.ID] = repo
}
} else {
table[repo.ID] = repo
}
}
return table
}
func recvFSCB(rsp http.ResponseWriter, r *http.Request) *appError {
vars := mux.Vars(r)
repoID := vars["repoid"]
user, appErr := validateToken(r, repoID, false)
if appErr != nil {
return appErr
}
appErr = checkPermission(repoID, user, "upload", false)
if appErr != nil {
return appErr
}
storeID, err := getRepoStoreID(repoID)
if err != nil {
err := fmt.Errorf("Failed to get repo store id by repo id %s: %v", repoID, err)
return &appError{err, "", http.StatusInternalServerError}
}
fsBuf, err := io.ReadAll(r.Body)
if err != nil {
return &appError{nil, err.Error(), http.StatusBadRequest}
}
for len(fsBuf) > 44 {
objID := string(fsBuf[:40])
if !utils.IsObjectIDValid(objID) {
msg := fmt.Sprintf("Fs obj id %s is invalid", objID)
return &appError{nil, msg, http.StatusBadRequest}
}
var objSize uint32
sizeBuffer := bytes.NewBuffer(fsBuf[40:44])
if err := binary.Read(sizeBuffer, binary.BigEndian, &objSize); err != nil {
msg := fmt.Sprintf("Failed to read fs obj size: %v", err)
return &appError{nil, msg, http.StatusBadRequest}
}
if len(fsBuf) < int(44+objSize) {
msg := "Request body size invalid"
return &appError{nil, msg, http.StatusBadRequest}
}
objBuffer := bytes.NewBuffer(fsBuf[44 : 44+objSize])
if err := fsmgr.WriteRaw(storeID, objID, objBuffer); err != nil {
err := fmt.Errorf("Failed to write fs obj %s:%s : %v", storeID, objID, err)
return &appError{err, "", http.StatusInternalServerError}
}
fsBuf = fsBuf[44+objSize:]
}
if len(fsBuf) == 0 {
rsp.WriteHeader(http.StatusOK)
return nil
}
msg := "Request body size invalid"
return &appError{nil, msg, http.StatusBadRequest}
}
func checkFSCB(rsp http.ResponseWriter, r *http.Request) *appError {
return postCheckExistCB(rsp, r, checkFSExist)
}
func checkBlockCB(rsp http.ResponseWriter, r *http.Request) *appError {
return postCheckExistCB(rsp, r, checkBlockExist)
}
func postCheckExistCB(rsp http.ResponseWriter, r *http.Request, existType checkExistType) *appError {
vars := mux.Vars(r)
repoID := vars["repoid"]
user, appErr := validateToken(r, repoID, false)
if appErr != nil {
return appErr
}
appErr = checkPermission(repoID, user, "download", false)
if appErr != nil {
return appErr
}
storeID, err := getRepoStoreID(repoID)
if err != nil {
err := fmt.Errorf("Failed to get repo store id by repo id %s: %v", repoID, err)
return &appError{err, "", http.StatusInternalServerError}
}
var objIDList []string
if err := json.NewDecoder(r.Body).Decode(&objIDList); err != nil {
return &appError{nil, err.Error(), http.StatusBadRequest}
}
var neededObjs []string
var ret bool
for i := 0; i < len(objIDList); i++ {
if !utils.IsObjectIDValid(objIDList[i]) {
continue
}
if existType == checkFSExist {
ret, _ = fsmgr.Exists(storeID, objIDList[i])
} else if existType == checkBlockExist {
ret = blockmgr.Exists(storeID, objIDList[i])
}
if !ret {
neededObjs = append(neededObjs, objIDList[i])
}
}
var data []byte
if neededObjs != nil {
data, err = json.Marshal(neededObjs)
if err != nil {
err := fmt.Errorf("Failed to marshal json: %v", err)
return &appError{err, "", http.StatusInternalServerError}
}
} else {
data = []byte{'[', ']'}
}
rsp.Header().Set("Content-Length", strconv.Itoa(len(data)))
rsp.WriteHeader(http.StatusOK)
rsp.Write(data)
return nil
}
func packFSCB(rsp http.ResponseWriter, r *http.Request) *appError {
vars := mux.Vars(r)
repoID := vars["repoid"]
user, appErr := validateToken(r, repoID, false)
if appErr != nil {
return appErr
}
appErr = checkPermission(repoID, user, "download", false)
if appErr != nil {
return appErr
}
storeID, err := getRepoStoreID(repoID)
if err != nil {
err := fmt.Errorf("Failed to get repo store id by repo id %s: %v", repoID, err)
return &appError{err, "", http.StatusInternalServerError}
}
var fsIDList []string
if err := json.NewDecoder(r.Body).Decode(&fsIDList); err != nil {
return &appError{nil, err.Error(), http.StatusBadRequest}
}
var totalSize int
var data bytes.Buffer
for i := 0; i < len(fsIDList); i++ {
if !utils.IsObjectIDValid(fsIDList[i]) {
msg := fmt.Sprintf("Invalid fs id %s", fsIDList[i])
return &appError{nil, msg, http.StatusBadRequest}
}
data.WriteString(fsIDList[i])
var tmp bytes.Buffer
if err := fsmgr.ReadRaw(storeID, fsIDList[i], &tmp); err != nil {
err := fmt.Errorf("Failed to read fs %s:%s: %v", storeID, fsIDList[i], err)
return &appError{err, "", http.StatusInternalServerError}
}
tmpLen := make([]byte, 4)
binary.BigEndian.PutUint32(tmpLen, uint32(tmp.Len()))
data.Write(tmpLen)
data.Write(tmp.Bytes())
totalSize += tmp.Len()
if totalSize >= maxObjectPackSize {
break
}
}
rsp.Header().Set("Content-Length", strconv.Itoa(data.Len()))
rsp.WriteHeader(http.StatusOK)
rsp.Write(data.Bytes())
return nil
}
func headCommitsMultiCB(rsp http.ResponseWriter, r *http.Request) *appError {
var repoIDList []string
if err := json.NewDecoder(r.Body).Decode(&repoIDList); err != nil {
return &appError{err, "", http.StatusBadRequest}
}
if len(repoIDList) == 0 {
return &appError{nil, "", http.StatusBadRequest}
}
var repoIDs strings.Builder
for i := 0; i < len(repoIDList); i++ {
if !utils.IsValidUUID(repoIDList[i]) {
return &appError{nil, "", http.StatusBadRequest}
}
if i == 0 {
repoIDs.WriteString(fmt.Sprintf("'%s'", repoIDList[i]))
} else {
repoIDs.WriteString(fmt.Sprintf(",'%s'", repoIDList[i]))
}
}
sqlStr := fmt.Sprintf(
"SELECT repo_id, commit_id FROM Branch WHERE name='master' AND "+
"repo_id IN (%s) LOCK IN SHARE MODE",
repoIDs.String())
ctx, cancel := context.WithTimeout(context.Background(), option.DBOpTimeout)
defer cancel()
rows, err := seafileDB.QueryContext(ctx, sqlStr)
if err != nil {
err := fmt.Errorf("Failed to get commit id: %v", err)
return &appError{err, "", http.StatusInternalServerError}
}
defer rows.Close()
commitIDMap := make(map[string]string)
var repoID string
var commitID string
for rows.Next() {
if err := rows.Scan(&repoID, &commitID); err == nil {
commitIDMap[repoID] = commitID
}
}
if err := rows.Err(); err != nil {
err := fmt.Errorf("Failed to get commit id: %v", err)
return &appError{err, "", http.StatusInternalServerError}
}
data, err := json.Marshal(commitIDMap)
if err != nil {
err := fmt.Errorf("Failed to marshal json: %v", err)
return &appError{err, "", http.StatusInternalServerError}
}
rsp.Header().Set("Content-Length", strconv.Itoa(len(data)))
rsp.WriteHeader(http.StatusOK)
rsp.Write(data)
return nil
}
func getCheckQuotaCB(rsp http.ResponseWriter, r *http.Request) *appError {
vars := mux.Vars(r)
repoID := vars["repoid"]
if _, err := validateToken(r, repoID, false); err != nil {
return err
}
queries := r.URL.Query()
delta := queries.Get("delta")
if delta == "" {
msg := "Invalid delta parameter"
return &appError{nil, msg, http.StatusBadRequest}
}
deltaNum, err := strconv.ParseInt(delta, 10, 64)
if err != nil {
msg := "Invalid delta parameter"
return &appError{nil, msg, http.StatusBadRequest}
}
ret, err := checkQuota(repoID, deltaNum)
if err != nil {
msg := "Internal error.\n"
err := fmt.Errorf("failed to check quota: %v", err)
return &appError{err, msg, http.StatusInternalServerError}
}
if ret == 1 {
msg := "Out of quota.\n"
return &appError{nil, msg, seafHTTPResNoQuota}
}
return nil
}
func getJWTTokenCB(rsp http.ResponseWriter, r *http.Request) *appError {
vars := mux.Vars(r)
repoID := vars["repoid"]
if !option.EnableNotification {
return &appError{nil, "", http.StatusNotFound}
}
user, appErr := validateToken(r, repoID, false)
if appErr != nil {
return appErr
}
exp := time.Now().Add(time.Hour * 72).Unix()
tokenString, err := utils.GenNotifJWTToken(repoID, user, exp)
if err != nil {
return &appError{err, "", http.StatusInternalServerError}
}
data := fmt.Sprintf("{\"jwt_token\":\"%s\"}", tokenString)
rsp.Write([]byte(data))
return nil
}
func getFsObjIDCB(rsp http.ResponseWriter, r *http.Request) *appError {
recvChan := make(chan *calResult)
calFsIdPool.AddTask(recvChan, rsp, r)
result := <-recvChan
return result.err
}
func headCommitOperCB(rsp http.ResponseWriter, r *http.Request) *appError {
if r.Method == http.MethodGet {
return getHeadCommit(rsp, r)
} else if r.Method == http.MethodPut {
return putUpdateBranchCB(rsp, r)
}
return &appError{nil, "", http.StatusBadRequest}
}
func commitOperCB(rsp http.ResponseWriter, r *http.Request) *appError {
if r.Method == http.MethodGet {
return getCommitInfo(rsp, r)
} else if r.Method == http.MethodPut {
return putCommitCB(rsp, r)
}
return &appError{nil, "", http.StatusBadRequest}
}
func blockOperCB(rsp http.ResponseWriter, r *http.Request) *appError {
if r.Method == http.MethodGet {
return getBlockInfo(rsp, r)
} else if r.Method == http.MethodPut {
return putSendBlockCB(rsp, r)
}
return &appError{nil, "", http.StatusBadRequest}
}
func putSendBlockCB(rsp http.ResponseWriter, r *http.Request) *appError {
vars := mux.Vars(r)
repoID := vars["repoid"]
blockID := vars["id"]
user, appErr := validateToken(r, repoID, false)
if appErr != nil {
return appErr
}
appErr = checkPermission(repoID, user, "upload", false)
if appErr != nil {
return appErr
}
storeID, err := getRepoStoreID(repoID)
if err != nil {
err := fmt.Errorf("Failed to get repo store id by repo id %s: %v", repoID, err)
return &appError{err, "", http.StatusInternalServerError}
}
if err := blockmgr.Write(storeID, blockID, r.Body); err != nil {
err := fmt.Errorf("Failed to write block %.8s:%s: %v", storeID, blockID, err)
return &appError{err, "", http.StatusInternalServerError}
}
sendStatisticMsg(storeID, user, "sync-file-upload", uint64(r.ContentLength))
return nil
}
func getBlockInfo(rsp http.ResponseWriter, r *http.Request) *appError {
vars := mux.Vars(r)
repoID := vars["repoid"]
blockID := vars["id"]
user, appErr := validateToken(r, repoID, false)
if appErr != nil {
return appErr
}
appErr = checkPermission(repoID, user, "download", false)
if appErr != nil {
return appErr
}
storeID, err := getRepoStoreID(repoID)
if err != nil {
err := fmt.Errorf("Failed to get repo store id by repo id %s: %v", repoID, err)
return &appError{err, "", http.StatusInternalServerError}
}
blockSize, err := blockmgr.Stat(storeID, blockID)
if err != nil {
return &appError{err, "", http.StatusInternalServerError}
}
if blockSize <= 0 {
err := fmt.Errorf("block %.8s:%s size invalid", storeID, blockID)
return &appError{err, "", http.StatusInternalServerError}
}
blockLen := fmt.Sprintf("%d", blockSize)
rsp.Header().Set("Content-Length", blockLen)
if err := blockmgr.Read(storeID, blockID, rsp); err != nil {
if !isNetworkErr(err) {
log.Errorf("failed to read block %s: %v", blockID, err)
}
return nil
}
sendStatisticMsg(storeID, user, "sync-file-download", uint64(blockSize))
return nil
}
func getRepoStoreID(repoID string) (string, error) {
var storeID string
if value, ok := virtualRepoInfoCache.Load(repoID); ok {
if info, ok := value.(*virtualRepoInfo); ok {
if info.storeID != "" {
storeID = info.storeID
} else {
storeID = repoID
}
info.expireTime = time.Now().Unix() + virtualRepoExpireTime
}
}
if storeID != "" {
return storeID, nil
}
var vInfo virtualRepoInfo
var rID, originRepoID sql.NullString
sqlStr := "SELECT repo_id, origin_repo FROM VirtualRepo where repo_id = ?"
ctx, cancel := context.WithTimeout(context.Background(), option.DBOpTimeout)
defer cancel()
row := seafileDB.QueryRowContext(ctx, sqlStr, repoID)
if err := row.Scan(&rID, &originRepoID); err != nil {
if err == sql.ErrNoRows {
vInfo.storeID = repoID
vInfo.expireTime = time.Now().Unix() + virtualRepoExpireTime
virtualRepoInfoCache.Store(repoID, &vInfo)
return repoID, nil
}
return "", err
}
if !rID.Valid || !originRepoID.Valid {
return "", nil
}
vInfo.storeID = originRepoID.String
vInfo.expireTime = time.Now().Unix() + virtualRepoExpireTime
virtualRepoInfoCache.Store(repoID, &vInfo)
return originRepoID.String, nil
}
func sendStatisticMsg(repoID, user, operation string, bytes uint64) {
rData := &statsEventData{operation, user, repoID, bytes}
publishStatsEvent(rData)
}
func publishStatsEvent(rData *statsEventData) {
data := make(map[string]interface{})
data["msg_type"] = rData.eType
data["user_name"] = rData.user
data["repo_id"] = rData.repoID
data["bytes"] = rData.bytes
jsonData, err := json.Marshal(data)
if err != nil {
log.Warnf("Failed to publish event: %v", err)
return
}
if _, err := rpcclient.Call("publish_event", seafileServerChannelStats, string(jsonData)); err != nil {
log.Warnf("Failed to publish event: %v", err)
}
}
func saveLastGCID(repoID, token string) error {
repo := repomgr.Get(repoID)
if repo == nil {
return fmt.Errorf("failed to get repo: %s", repoID)
}
gcID, err := repomgr.GetCurrentGCID(repo.StoreID)
if err != nil {
return err
}
return repomgr.SetLastGCID(repoID, token, gcID)
}
func putCommitCB(rsp http.ResponseWriter, r *http.Request) *appError {
vars := mux.Vars(r)
repoID := vars["repoid"]
commitID := vars["id"]
user, appErr := validateToken(r, repoID, false)
if appErr != nil {
return appErr
}
appErr = checkPermission(repoID, user, "upload", true)
if appErr != nil {
return appErr
}
data, err := io.ReadAll(r.Body)
if err != nil {
return &appError{nil, err.Error(), http.StatusBadRequest}
}
commit := new(commitmgr.Commit)
if err := commit.FromData(data); err != nil {
return &appError{nil, err.Error(), http.StatusBadRequest}
}
if commit.RepoID != repoID {
msg := "The repo id in commit does not match current repo id"
return &appError{nil, msg, http.StatusBadRequest}
}
if err := commitmgr.Save(commit); err != nil {
err := fmt.Errorf("Failed to add commit %s: %v", commitID, err)
return &appError{err, "", http.StatusInternalServerError}
} else {
token := r.Header.Get("Seafile-Repo-Token")
if token == "" {
token = utils.GetAuthorizationToken(r.Header)
}
if err := saveLastGCID(repoID, token); err != nil {
err := fmt.Errorf("Failed to save gc id: %v", err)
return &appError{err, "", http.StatusInternalServerError}
}
}
return nil
}
func getCommitInfo(rsp http.ResponseWriter, r *http.Request) *appError {
vars := mux.Vars(r)
repoID := vars["repoid"]
commitID := vars["id"]
user, appErr := validateToken(r, repoID, false)
if appErr != nil {
return appErr
}
appErr = checkPermission(repoID, user, "download", false)
if appErr != nil {
return appErr
}
if exists, _ := commitmgr.Exists(repoID, commitID); !exists {
return &appError{nil, "", http.StatusNotFound}
}
var data bytes.Buffer
err := commitmgr.ReadRaw(repoID, commitID, &data)
if err != nil {
err := fmt.Errorf("Failed to read commit %s:%s: %v", repoID, commitID, err)
return &appError{err, "", http.StatusInternalServerError}
}
dataLen := strconv.Itoa(data.Len())
rsp.Header().Set("Content-Length", dataLen)
rsp.WriteHeader(http.StatusOK)
rsp.Write(data.Bytes())
return nil
}
func putUpdateBranchCB(rsp http.ResponseWriter, r *http.Request) *appError {
queries := r.URL.Query()
newCommitID := queries.Get("head")
if newCommitID == "" || !utils.IsObjectIDValid(newCommitID) {
msg := fmt.Sprintf("commit id %s is invalid", newCommitID)
return &appError{nil, msg, http.StatusBadRequest}
}
vars := mux.Vars(r)
repoID := vars["repoid"]
user, appErr := validateToken(r, repoID, false)
if appErr != nil {
return appErr
}
appErr = checkPermission(repoID, user, "upload", false)
if appErr != nil && appErr.Code == http.StatusForbidden {
return appErr
}
repo := repomgr.Get(repoID)
if repo == nil {
err := fmt.Errorf("Repo %s is missing or corrupted", repoID)
return &appError{err, "", http.StatusInternalServerError}
}
newCommit, err := commitmgr.Load(repoID, newCommitID)
if err != nil {
err := fmt.Errorf("Failed to get commit %s for repo %s", newCommitID, repoID)
return &appError{err, "", http.StatusInternalServerError}
}
base, err := commitmgr.Load(repoID, newCommit.ParentID.String)
if err != nil {
err := fmt.Errorf("Failed to get commit %s for repo %s", newCommit.ParentID.String, repoID)
return &appError{err, "", http.StatusInternalServerError}
}
if includeInvalidPath(base, newCommit) {
msg := "Dir or file name is .."
return &appError{nil, msg, http.StatusBadRequest}
}
ret, err := checkQuota(repoID, 0)
if err != nil {
err := fmt.Errorf("Failed to check quota: %v", err)
return &appError{err, "", http.StatusInternalServerError}
}
if ret == 1 {
msg := "Out of quota.\n"
return &appError{nil, msg, seafHTTPResNoQuota}
}
if option.VerifyClientBlocks {
if body, err := checkBlocks(r.Context(), repo, base, newCommit); err != nil {
return &appError{nil, body, seafHTTPResBlockMissing}
}
}
token := r.Header.Get("Seafile-Repo-Token")
if token == "" {
token = utils.GetAuthorizationToken(r.Header)
}
if err := fastForwardOrMerge(user, token, repo, base, newCommit); err != nil {
if errors.Is(err, ErrGCConflict) {
return &appError{nil, "GC Conflict.\n", http.StatusConflict}
} else {
err := fmt.Errorf("Fast forward merge for repo %s is failed: %v", repoID, err)
return &appError{err, "", http.StatusInternalServerError}
}
}
go mergeVirtualRepoPool.AddTask(repoID, "")
go updateSizePool.AddTask(repoID)
rsp.WriteHeader(http.StatusOK)
return nil
}
type checkBlockAux struct {
storeID string
version int
fileList []string
}
func checkBlocks(ctx context.Context, repo *repomgr.Repo, base, remote *commitmgr.Commit) (string, error) {
aux := new(checkBlockAux)
aux.storeID = repo.StoreID
aux.version = repo.Version
opt := &diff.DiffOptions{
FileCB: checkFileBlocks,
DirCB: checkDirCB,
Ctx: ctx,
RepoID: repo.StoreID}
opt.Data = aux
trees := []string{base.RootID, remote.RootID}
if err := diff.DiffTrees(trees, opt); err != nil {
return "", err
}
if len(aux.fileList) == 0 {
return "", nil
}
body, _ := json.Marshal(aux.fileList)
return string(body), fmt.Errorf("block is missing")
}
func checkFileBlocks(ctx context.Context, baseDir string, files []*fsmgr.SeafDirent, data interface{}) error {
select {
case <-ctx.Done():
return context.Canceled
default:
}
file1 := files[0]
file2 := files[1]
aux, ok := data.(*checkBlockAux)
if !ok {
err := fmt.Errorf("failed to assert results")
return err
}
if file2 == nil || file2.ID == emptySHA1 || (file1 != nil && file1.ID == file2.ID) {
return nil
}
file, err := fsmgr.GetSeafile(aux.storeID, file2.ID)
if err != nil {
return err
}
for _, blkID := range file.BlkIDs {
if !blockmgr.Exists(aux.storeID, blkID) {
aux.fileList = append(aux.fileList, file2.Name)
return nil
}
}
return nil
}
func checkDirCB(ctx context.Context, baseDir string, dirs []*fsmgr.SeafDirent, data interface{}, recurse *bool) error {
select {
case <-ctx.Done():
return context.Canceled
default:
}
dir1 := dirs[0]
dir2 := dirs[1]
if dir1 == nil {
// if dir2 is empty, stop diff.
if dir2.ID == diff.EmptySha1 {
*recurse = false
} else {
*recurse = true
}
return nil
}
// if dir2 is not exist, stop diff.
if dir2 == nil {
*recurse = false
return nil
}
// if dir1 and dir2 are the same or dir2 is empty, stop diff.
if dir1.ID == dir2.ID || dir2.ID == diff.EmptySha1 {
*recurse = false
return nil
}
return nil
}
func includeInvalidPath(baseCommit, newCommit *commitmgr.Commit) bool {
var results []*diff.DiffEntry
if err := diff.DiffCommits(baseCommit, newCommit, &results, true); err != nil {
log.Infof("Failed to diff commits: %v", err)
return false
}
for _, entry := range results {
if entry.NewName != "" {
if shouldIgnore(entry.NewName) {
return true
}
} else {
if shouldIgnore(entry.Name) {
return true
}
}
}
return false
}
func getHeadCommit(rsp http.ResponseWriter, r *http.Request) *appError {
vars := mux.Vars(r)
repoID := vars["repoid"]
sqlStr := "SELECT EXISTS(SELECT 1 FROM Repo WHERE repo_id=?)"
var exists bool
ctx, cancel := context.WithTimeout(context.Background(), option.DBOpTimeout)
defer cancel()
row := seafileDB.QueryRowContext(ctx, sqlStr, repoID)
if err := row.Scan(&exists); err != nil {
if err != sql.ErrNoRows {
log.Errorf("DB error when check repo %s existence: %v", repoID, err)
msg := `{"is_corrupted": 1}`
rsp.WriteHeader(http.StatusOK)
rsp.Write([]byte(msg))
return nil
}
}
if !exists {
return &appError{nil, "", seafHTTPResRepoDeleted}
}
if _, err := validateToken(r, repoID, false); err != nil {
return err
}
var commitID string
sqlStr = "SELECT commit_id FROM Branch WHERE name='master' AND repo_id=?"
row = seafileDB.QueryRowContext(ctx, sqlStr, repoID)
if err := row.Scan(&commitID); err != nil {
if err != sql.ErrNoRows {
log.Errorf("DB error when get branch master: %v", err)
msg := `{"is_corrupted": 1}`
rsp.WriteHeader(http.StatusOK)
rsp.Write([]byte(msg))
return nil
}
}
if commitID == "" {
return &appError{nil, "", http.StatusBadRequest}
}
msg := fmt.Sprintf("{\"is_corrupted\": 0, \"head_commit_id\": \"%s\"}", commitID)
rsp.WriteHeader(http.StatusOK)
rsp.Write([]byte(msg))
return nil
}
func checkPermission(repoID, user, op string, skipCache bool) *appError {
var info *permInfo
if !skipCache {
if value, ok := permCache.Load(fmt.Sprintf("%s:%s:%s", repoID, user, op)); ok {
info = value.(*permInfo)
}
}
if info != nil {
return nil
}
permCache.Delete(fmt.Sprintf("%s:%s:%s", repoID, user, op))
if op == "upload" {
status, err := repomgr.GetRepoStatus(repoID)
if err != nil {
msg := fmt.Sprintf("Failed to get repo status by repo id %s: %v", repoID, err)
return &appError{nil, msg, http.StatusForbidden}
}
if status != repomgr.RepoStatusNormal && status != -1 {
return &appError{nil, "", http.StatusForbidden}
}
}
perm := share.CheckPerm(repoID, user)
if perm != "" {
if perm == "r" && op == "upload" {
return &appError{nil, "", http.StatusForbidden}
}
info = new(permInfo)
info.perm = perm
info.expireTime = time.Now().Unix() + permExpireTime
permCache.Store(fmt.Sprintf("%s:%s:%s", repoID, user, op), info)
return nil
}
return &appError{nil, "", http.StatusForbidden}
}
func validateToken(r *http.Request, repoID string, skipCache bool) (string, *appError) {
token := r.Header.Get("Seafile-Repo-Token")
if token == "" {
token = utils.GetAuthorizationToken(r.Header)
if token == "" {
msg := "token is null"
return "", &appError{nil, msg, http.StatusBadRequest}
}
}
if !skipCache {
if value, ok := tokenCache.Load(token); ok {
if info, ok := value.(*tokenInfo); ok {
if info.repoID != repoID {
msg := "Invalid token"
return "", &appError{nil, msg, http.StatusForbidden}
}
return info.email, nil
}
}
}
email, err := repomgr.GetEmailByToken(repoID, token)
if err != nil {
log.Errorf("Failed to get email by token %s: %v", token, err)
tokenCache.Delete(token)
return email, &appError{err, "", http.StatusInternalServerError}
}
if email == "" {
tokenCache.Delete(token)
msg := fmt.Sprintf("Failed to get email by token %s", token)
return email, &appError{nil, msg, http.StatusForbidden}
}
info := new(tokenInfo)
info.email = email
info.expireTime = time.Now().Unix() + tokenExpireTime
info.repoID = repoID
tokenCache.Store(token, info)
return email, nil
}
func validateClientVer(clientVer string) int {
versions := strings.Split(clientVer, ".")
if len(versions) != 3 {
return http.StatusBadRequest
}
if _, err := strconv.Atoi(versions[0]); err != nil {
return http.StatusBadRequest
}
if _, err := strconv.Atoi(versions[1]); err != nil {
return http.StatusBadRequest
}
if _, err := strconv.Atoi(versions[2]); err != nil {
return http.StatusBadRequest
}
return http.StatusOK
}
func getClientIPAddr(r *http.Request) string {
xForwardedFor := r.Header.Get("X-Forwarded-For")
addr := strings.TrimSpace(strings.Split(xForwardedFor, ",")[0])
ip := net.ParseIP(addr)
if ip != nil {
return ip.String()
}
addr = strings.TrimSpace(r.Header.Get("X-Real-Ip"))
ip = net.ParseIP(addr)
if ip != nil {
return ip.String()
}
if addr, _, err := net.SplitHostPort(strings.TrimSpace(r.RemoteAddr)); err == nil {
ip = net.ParseIP(addr)
if ip != nil {
return ip.String()
}
}
return ""
}
func onRepoOper(eType, repoID, user, ip, clientName string) {
rData := new(repoEventData)
vInfo, err := repomgr.GetVirtualRepoInfo(repoID)
if err != nil {
log.Errorf("Failed to get virtual repo info by repo id %s: %v", repoID, err)
return
}
if vInfo != nil {
rData.repoID = vInfo.OriginRepoID
rData.path = vInfo.Path
} else {
rData.repoID = repoID
}
rData.eType = eType
rData.user = user
rData.ip = ip
rData.clientName = clientName
publishRepoEvent(rData)
}
func publishRepoEvent(rData *repoEventData) {
if rData.path == "" {
rData.path = "/"
}
data := make(map[string]interface{})
data["msg_type"] = rData.eType
data["user_name"] = rData.user
data["ip"] = rData.ip
data["user_agent"] = rData.clientName
data["repo_id"] = rData.repoID
data["file_path"] = rData.path
jsonData, err := json.Marshal(data)
if err != nil {
log.Warnf("Failed to publish event: %v", err)
return
}
if _, err := rpcclient.Call("publish_event", seafileServerChannelEvent, string(jsonData)); err != nil {
log.Warnf("Failed to publish event: %v", err)
}
}
func publishUpdateEvent(repoID string, commitID string) {
data := make(map[string]interface{})
data["msg_type"] = "repo-update"
data["repo_id"] = repoID
data["commit_id"] = commitID
jsonData, err := json.Marshal(data)
if err != nil {
log.Warnf("Failed to publish event: %v", err)
return
}
if _, err := rpcclient.Call("publish_event", seafileServerChannelEvent, string(jsonData)); err != nil {
log.Warnf("Failed to publish event: %v", err)
}
}
func removeSyncAPIExpireCache() {
deleteTokens := func(key interface{}, value interface{}) bool {
if info, ok := value.(*tokenInfo); ok {
if info.expireTime <= time.Now().Unix() {
tokenCache.Delete(key)
}
}
return true
}
deletePerms := func(key interface{}, value interface{}) bool {
if info, ok := value.(*permInfo); ok {
if info.expireTime <= time.Now().Unix() {
permCache.Delete(key)
}
}
return true
}
deleteVirtualRepoInfo := func(key interface{}, value interface{}) bool {
if info, ok := value.(*virtualRepoInfo); ok {
if info.expireTime <= time.Now().Unix() {
virtualRepoInfoCache.Delete(key)
}
}
return true
}
tokenCache.Range(deleteTokens)
permCache.Range(deletePerms)
virtualRepoInfoCache.Range(deleteVirtualRepoInfo)
}
type collectFsInfo struct {
startTime int64
isTimeout bool
results []interface{}
}
var ErrTimeout = fmt.Errorf("get fs id list timeout")
func calculateSendObjectList(ctx context.Context, repo *repomgr.Repo, serverHead string, clientHead string, dirOnly bool) ([]interface{}, error) {
masterHead, err := commitmgr.Load(repo.ID, serverHead)
if err != nil {
err := fmt.Errorf("Failed to load server head commit %s:%s: %v", repo.ID, serverHead, err)
return nil, err
}
var remoteHead *commitmgr.Commit
remoteHeadRoot := emptySHA1
if clientHead != "" {
remoteHead, err = commitmgr.Load(repo.ID, clientHead)
if err != nil {
err := fmt.Errorf("Failed to load remote head commit %s:%s: %v", repo.ID, clientHead, err)
return nil, err
}
remoteHeadRoot = remoteHead.RootID
}
info := new(collectFsInfo)
info.startTime = time.Now().Unix()
if remoteHeadRoot != masterHead.RootID && masterHead.RootID != emptySHA1 {
info.results = append(info.results, masterHead.RootID)
}
var opt *diff.DiffOptions
if !dirOnly {
opt = &diff.DiffOptions{
FileCB: collectFileIDs,
DirCB: collectDirIDs,
Ctx: ctx,
RepoID: repo.StoreID}
opt.Data = info
} else {
opt = &diff.DiffOptions{
FileCB: collectFileIDsNOp,
DirCB: collectDirIDs,
Ctx: ctx,
RepoID: repo.StoreID}
opt.Data = info
}
trees := []string{masterHead.RootID, remoteHeadRoot}
if err := diff.DiffTrees(trees, opt); err != nil {
if info.isTimeout {
return nil, ErrTimeout
}
return nil, err
}
return info.results, nil
}
func collectFileIDs(ctx context.Context, baseDir string, files []*fsmgr.SeafDirent, data interface{}) error {
select {
case <-ctx.Done():
return context.Canceled
default:
}
file1 := files[0]
file2 := files[1]
info, ok := data.(*collectFsInfo)
if !ok {
err := fmt.Errorf("failed to assert results")
return err
}
if file1 != nil &&
(file2 == nil || file1.ID != file2.ID) &&
file1.ID != emptySHA1 {
info.results = append(info.results, file1.ID)
}
return nil
}
func collectFileIDsNOp(ctx context.Context, baseDir string, files []*fsmgr.SeafDirent, data interface{}) error {
return nil
}
func collectDirIDs(ctx context.Context, baseDir string, dirs []*fsmgr.SeafDirent, data interface{}, recurse *bool) error {
select {
case <-ctx.Done():
return context.Canceled
default:
}
info, ok := data.(*collectFsInfo)
if !ok {
err := fmt.Errorf("failed to assert fs info")
return err
}
dir1 := dirs[0]
dir2 := dirs[1]
if dir1 != nil &&
(dir2 == nil || dir1.ID != dir2.ID) &&
dir1.ID != emptySHA1 {
info.results = append(info.results, dir1.ID)
}
if option.FsIdListRequestTimeout > 0 {
now := time.Now().Unix()
if now-info.startTime > option.FsIdListRequestTimeout {
info.isTimeout = true
return ErrTimeout
}
}
return nil
}
================================================
FILE: fileserver/utils/dup2.go
================================================
//go:build !(linux && arm64)
package utils
import (
"syscall"
)
func Dup(from, to int) error {
return syscall.Dup2(from, to)
}
================================================
FILE: fileserver/utils/dup3.go
================================================
//go:build linux && arm64
package utils
import (
"syscall"
)
func Dup(from, to int) error {
return syscall.Dup3(from, to, 0)
}
================================================
FILE: fileserver/utils/http.go
================================================
package utils
import (
"context"
"encoding/json"
"fmt"
"io"
"net/http"
"strings"
"time"
)
func GetAuthorizationToken(h http.Header) string {
auth := h.Get("Authorization")
splitResult := strings.Split(auth, " ")
if len(splitResult) > 1 {
return splitResult[1]
}
return ""
}
func HttpCommon(method, url string, header map[string][]string, reader io.Reader) (int, []byte, error) {
header["Content-Type"] = []string{"application/json"}
header["User-Agent"] = []string{"Seafile Server"}
ctx, cancel := context.WithTimeout(context.Background(), 45*time.Second)
defer cancel()
req, err := http.NewRequestWithContext(ctx, method, url, reader)
if err != nil {
return http.StatusInternalServerError, nil, err
}
req.Header = header
rsp, err := http.DefaultClient.Do(req)
if err != nil {
return http.StatusInternalServerError, nil, err
}
defer rsp.Body.Close()
if rsp.StatusCode != http.StatusOK {
errMsg := parseErrorMessage(rsp.Body)
return rsp.StatusCode, errMsg, fmt.Errorf("bad response %d for %s", rsp.StatusCode, url)
}
body, err := io.ReadAll(rsp.Body)
if err != nil {
return rsp.StatusCode, nil, err
}
return http.StatusOK, body, nil
}
func parseErrorMessage(r io.Reader) []byte {
body, err := io.ReadAll(r)
if err != nil {
return nil
}
var objs map[string]string
err = json.Unmarshal(body, &objs)
if err != nil {
return body
}
errMsg, ok := objs["error_msg"]
if ok {
return []byte(errMsg)
}
return body
}
================================================
FILE: fileserver/utils/utils.go
================================================
package utils
import (
"fmt"
"time"
jwt "github.com/golang-jwt/jwt/v5"
"github.com/google/uuid"
"github.com/haiwen/seafile-server/fileserver/option"
)
func IsValidUUID(u string) bool {
_, err := uuid.Parse(u)
return err == nil
}
func IsObjectIDValid(objID string) bool {
if len(objID) != 40 {
return false
}
for i := 0; i < len(objID); i++ {
c := objID[i]
if (c >= '0' && c <= '9') || (c >= 'a' && c <= 'f') {
continue
}
return false
}
return true
}
type SeahubClaims struct {
Exp int64 `json:"exp"`
IsInternal bool `json:"is_internal"`
jwt.RegisteredClaims
}
func (*SeahubClaims) Valid() error {
return nil
}
func GenSeahubJWTToken() (string, error) {
claims := new(SeahubClaims)
claims.Exp = time.Now().Add(time.Second * 300).Unix()
claims.IsInternal = true
token := jwt.NewWithClaims(jwt.GetSigningMethod("HS256"), claims)
tokenString, err := token.SignedString([]byte(option.JWTPrivateKey))
if err != nil {
err := fmt.Errorf("failed to gen seahub jwt token: %w", err)
return "", err
}
return tokenString, nil
}
type MyClaims struct {
Exp int64 `json:"exp"`
RepoID string `json:"repo_id"`
UserName string `json:"username"`
jwt.RegisteredClaims
}
func (*MyClaims) Valid() error {
return nil
}
func GenNotifJWTToken(repoID, user string, exp int64) (string, error) {
claims := new(MyClaims)
claims.Exp = exp
claims.RepoID = repoID
claims.UserName = user
token := jwt.NewWithClaims(jwt.GetSigningMethod("HS256"), claims)
tokenString, err := token.SignedString([]byte(option.JWTPrivateKey))
if err != nil {
err := fmt.Errorf("failed to gen jwt token for repo %s: %w", repoID, err)
return "", err
}
return tokenString, nil
}
================================================
FILE: fileserver/virtual_repo.go
================================================
package main
import (
"errors"
"fmt"
"path/filepath"
"strings"
"sync"
"time"
"math/rand"
"github.com/haiwen/seafile-server/fileserver/commitmgr"
"github.com/haiwen/seafile-server/fileserver/diff"
"github.com/haiwen/seafile-server/fileserver/fsmgr"
"github.com/haiwen/seafile-server/fileserver/option"
"github.com/haiwen/seafile-server/fileserver/repomgr"
"github.com/haiwen/seafile-server/fileserver/workerpool"
log "github.com/sirupsen/logrus"
)
const mergeVirtualRepoWorkerNumber = 5
var mergeVirtualRepoPool *workerpool.WorkPool
var runningRepo = make(map[string]struct{})
var runningRepoMutex sync.Mutex
func virtualRepoInit() {
mergeVirtualRepoPool = workerpool.CreateWorkerPool(mergeVirtualRepo, mergeVirtualRepoWorkerNumber)
}
func mergeVirtualRepo(args ...interface{}) error {
if len(args) < 1 {
return nil
}
repoID := args[0].(string)
virtual, err := repomgr.IsVirtualRepo(repoID)
if err != nil {
return err
}
if virtual {
runningRepoMutex.Lock()
if _, ok := runningRepo[repoID]; ok {
log.Debugf("a task for repo %s is already running", repoID)
go mergeVirtualRepoPool.AddTask(repoID)
runningRepoMutex.Unlock()
return nil
}
runningRepo[repoID] = struct{}{}
runningRepoMutex.Unlock()
err := mergeRepo(repoID)
if err != nil {
log.Errorf("%v", err)
}
runningRepoMutex.Lock()
delete(runningRepo, repoID)
runningRepoMutex.Unlock()
go updateSizePool.AddTask(repoID)
return nil
}
excludeRepo := ""
if len(args) > 1 {
excludeRepo = args[1].(string)
}
vRepos, _ := repomgr.GetVirtualRepoIDsByOrigin(repoID)
for _, id := range vRepos {
if id == excludeRepo {
continue
}
runningRepoMutex.Lock()
if _, ok := runningRepo[id]; ok {
log.Debugf("a task for repo %s is already running", id)
go mergeVirtualRepoPool.AddTask(id)
runningRepoMutex.Unlock()
continue
}
runningRepo[id] = struct{}{}
runningRepoMutex.Unlock()
err := mergeRepo(id)
if err != nil {
log.Errorf("%v", err)
}
runningRepoMutex.Lock()
delete(runningRepo, id)
runningRepoMutex.Unlock()
}
go updateSizePool.AddTask(repoID)
return nil
}
func mergeRepo(repoID string) error {
repo := repomgr.Get(repoID)
if repo == nil {
err := fmt.Errorf("failed to get virt repo %.10s", repoID)
return err
}
vInfo := repo.VirtualInfo
if vInfo == nil {
return nil
}
origRepo := repomgr.Get(vInfo.OriginRepoID)
if origRepo == nil {
err := fmt.Errorf("failed to get orig repo %.10s", repoID)
return err
}
head, err := commitmgr.Load(repo.ID, repo.HeadCommitID)
if err != nil {
err := fmt.Errorf("failed to get commit %s:%.8s", repo.ID, repo.HeadCommitID)
return err
}
origHead, err := commitmgr.Load(origRepo.ID, origRepo.HeadCommitID)
if err != nil {
err := fmt.Errorf("merge repo %.8s failed: failed to get origin repo commit %s:%.8s", repoID, origRepo.ID, origRepo.HeadCommitID)
return err
}
var origRoot string
origRoot, err = fsmgr.GetSeafdirIDByPath(origRepo.StoreID, origHead.RootID, vInfo.Path)
if err != nil && !errors.Is(err, fsmgr.ErrPathNoExist) {
err := fmt.Errorf("merge repo %.10s failed: failed to get seafdir id by path in origin repo %.10s: %v", repoID, origRepo.StoreID, err)
return err
}
if origRoot == "" {
newPath, _ := handleMissingVirtualRepo(origRepo, origHead, vInfo)
if newPath != "" {
origRoot, _ = fsmgr.GetSeafdirIDByPath(origRepo.StoreID, origHead.RootID, newPath)
}
if origRoot == "" {
return nil
}
}
base, err := commitmgr.Load(origRepo.ID, vInfo.BaseCommitID)
if err != nil {
err := fmt.Errorf("merge repo %.8s failed: failed to get origin repo commit %s:%.8s", repoID, origRepo.ID, vInfo.BaseCommitID)
return err
}
root := head.RootID
baseRoot, _ := fsmgr.GetSeafdirIDByPath(origRepo.StoreID, base.RootID, vInfo.Path)
if baseRoot == "" {
err := fmt.Errorf("merge repo %.10s failed: cannot find seafdir for origin repo %.10s path %s", repoID, vInfo.OriginRepoID, vInfo.Path)
return err
}
if root == origRoot {
} else if baseRoot == root {
_, err := updateDir(repoID, "/", origRoot, origHead.CreatorName, head.CommitID)
if err != nil {
err := fmt.Errorf("failed to update root of virtual repo %.10s", repoID)
return err
}
repomgr.SetVirtualRepoBaseCommitPath(repo.ID, origRepo.HeadCommitID, vInfo.Path)
} else if baseRoot == origRoot {
newBaseCommit, err := updateDir(vInfo.OriginRepoID, vInfo.Path, root, head.CreatorName, origHead.CommitID)
if err != nil {
err := fmt.Errorf("merge repo %.8s failed: failed to update origin repo%.10s path %s", repoID, vInfo.OriginRepoID, vInfo.Path)
return err
}
repomgr.SetVirtualRepoBaseCommitPath(repo.ID, newBaseCommit, vInfo.Path)
cleanupVirtualRepos(vInfo.OriginRepoID)
mergeVirtualRepo(vInfo.OriginRepoID, repoID)
} else {
roots := []string{baseRoot, origRoot, root}
opt := new(mergeOptions)
opt.remoteRepoID = repoID
opt.remoteHead = head.CommitID
err := mergeTrees(origRepo.StoreID, roots, opt)
if err != nil {
err := fmt.Errorf("failed to merge")
return err
}
_, err = updateDir(repoID, "/", opt.mergedRoot, origHead.CreatorName, head.CommitID)
if err != nil {
err := fmt.Errorf("failed to update root of virtual repo %.10s", repoID)
return err
}
newBaseCommit, err := updateDir(vInfo.OriginRepoID, vInfo.Path, opt.mergedRoot, head.CreatorName, origHead.CommitID)
if err != nil {
err := fmt.Errorf("merge repo %.10s failed: failed to update origin repo %.10s path %s", repoID, vInfo.OriginRepoID, vInfo.Path)
return err
}
repomgr.SetVirtualRepoBaseCommitPath(repo.ID, newBaseCommit, vInfo.Path)
cleanupVirtualRepos(vInfo.OriginRepoID)
mergeVirtualRepo(vInfo.OriginRepoID, repoID)
}
return nil
}
func cleanupVirtualRepos(repoID string) error {
repo := repomgr.Get(repoID)
if repo == nil {
err := fmt.Errorf("failed to get repo %.10s", repoID)
return err
}
head, err := commitmgr.Load(repo.ID, repo.HeadCommitID)
if err != nil {
err := fmt.Errorf("failed to load commit %s/%s : %v", repo.ID, repo.HeadCommitID, err)
return err
}
vRepos, err := repomgr.GetVirtualRepoInfoByOrigin(repoID)
if err != nil {
err := fmt.Errorf("failed to get virtual repo ids by origin repo %.10s", repoID)
return err
}
for _, vInfo := range vRepos {
_, err := fsmgr.GetSeafdirByPath(repo.StoreID, head.RootID, vInfo.Path)
if err != nil {
if err == fsmgr.ErrPathNoExist {
handleMissingVirtualRepo(repo, head, vInfo)
}
}
}
return nil
}
func handleMissingVirtualRepo(repo *repomgr.Repo, head *commitmgr.Commit, vInfo *repomgr.VRepoInfo) (string, error) {
parent, err := commitmgr.Load(head.RepoID, head.ParentID.String)
if err != nil {
err := fmt.Errorf("failed to load commit %s/%s : %v", head.RepoID, head.ParentID.String, err)
return "", err
}
var results []*diff.DiffEntry
err = diff.DiffCommits(parent, head, &results, true)
if err != nil {
err := fmt.Errorf("failed to diff commits")
return "", err
}
parPath := vInfo.Path
var isRenamed bool
var subPath string
var returnPath string
for {
var newPath string
oldDirID, err := fsmgr.GetSeafdirIDByPath(repo.StoreID, parent.RootID, parPath)
if err != nil || oldDirID == "" {
if err == fsmgr.ErrPathNoExist {
repomgr.DelVirtualRepo(vInfo.RepoID, option.CloudMode)
}
err := fmt.Errorf("failed to find %s under commit %s in repo %s", parPath, parent.CommitID, repo.StoreID)
return "", err
}
for _, de := range results {
if de.Status == diff.DiffStatusDirRenamed {
if de.Sha1 == oldDirID {
if subPath != "" {
newPath = filepath.Join("/", de.NewName, subPath)
} else {
newPath = filepath.Join("/", de.NewName)
}
repomgr.SetVirtualRepoBaseCommitPath(vInfo.RepoID, head.CommitID, newPath)
returnPath = newPath
if subPath == "" {
newName := filepath.Base(newPath)
err := editRepo(vInfo.RepoID, newName, "Changed library name", "")
if err != nil {
log.Warnf("falied to rename repo %s.\n", newName)
}
}
isRenamed = true
break
}
}
}
if isRenamed {
break
}
slash := strings.LastIndex(parPath, "/")
if slash <= 0 {
break
}
subPath = filepath.Base(parPath)
parPath = filepath.Dir(parPath)
}
if !isRenamed {
repomgr.DelVirtualRepo(vInfo.RepoID, option.CloudMode)
}
return returnPath, nil
}
func editRepo(repoID, name, desc, user string) error {
if name == "" && desc == "" {
err := fmt.Errorf("at least one argument should be non-null")
return err
}
var retryCnt int
for retry, err := editRepoNeedRetry(repoID, name, desc, user); err != nil || retry; {
if err != nil {
err := fmt.Errorf("failed to edit repo: %v", err)
return err
}
if retryCnt < 3 {
random := rand.Intn(10) + 1
time.Sleep(time.Duration(random*100) * time.Millisecond)
retryCnt++
} else {
err := fmt.Errorf("stop edit repo %s after 3 retries", repoID)
return err
}
}
return nil
}
func editRepoNeedRetry(repoID, name, desc, user string) (bool, error) {
repo := repomgr.Get(repoID)
if repo == nil {
err := fmt.Errorf("no such library")
return false, err
}
if name == "" {
name = repo.Name
}
if desc == "" {
desc = repo.Desc
}
parent, err := commitmgr.Load(repo.ID, repo.HeadCommitID)
if err != nil {
err := fmt.Errorf("failed to get commit %s:%s", repo.ID, repo.HeadCommitID)
return false, err
}
if user == "" {
user = parent.CreatorName
}
commit := commitmgr.NewCommit(repoID, parent.CommitID, parent.RootID, user, "Changed library name or description")
repomgr.RepoToCommit(repo, commit)
commit.RepoName = name
commit.RepoDesc = desc
err = commitmgr.Save(commit)
if err != nil {
err := fmt.Errorf("failed to add commit: %v", err)
return false, err
}
_, err = updateBranch(repoID, repo.StoreID, commit.CommitID, parent.CommitID, "", false, "")
if err != nil {
return true, nil
}
repomgr.UpdateRepoInfo(repoID, commit.CommitID)
return true, nil
}
================================================
FILE: fileserver/workerpool/workerpool.go
================================================
package workerpool
import (
"runtime/debug"
"github.com/dgraph-io/ristretto/z"
log "github.com/sirupsen/logrus"
)
type WorkPool struct {
jobs chan Job
jobCB JobCB
closer *z.Closer
}
// Job is the job object of workpool.
type Job struct {
callback JobCB
args []interface{}
}
type JobCB func(args ...interface{}) error
func CreateWorkerPool(jobCB JobCB, n int) *WorkPool {
pool := new(WorkPool)
pool.jobCB = jobCB
pool.jobs = make(chan Job, 100)
pool.closer = z.NewCloser(n)
for i := 0; i < n; i++ {
go pool.run(pool.jobs)
}
return pool
}
func (pool *WorkPool) AddTask(args ...interface{}) {
job := Job{pool.jobCB, args}
pool.jobs <- job
}
func (pool *WorkPool) run(jobs chan Job) {
defer func() {
if err := recover(); err != nil {
log.Errorf("panic: %v\n%s", err, debug.Stack())
}
}()
defer pool.closer.Done()
for {
select {
case job := <-pool.jobs:
if job.callback != nil {
err := job.callback(job.args...)
if err != nil {
log.Errorf("failed to call jobs: %v.\n", err)
}
}
case <-pool.closer.HasBeenClosed():
return
}
}
}
func (pool *WorkPool) Shutdown() {
pool.closer.SignalAndWait()
}
================================================
FILE: fuse/Makefile.am
================================================
AM_CFLAGS = -DPKGDATADIR=\"$(pkgdatadir)\" \
-DPACKAGE_DATA_DIR=\""$(pkgdatadir)"\" \
-DSEAFILE_SERVER \
-I$(top_srcdir)/include \
-I$(top_srcdir)/lib \
-I$(top_builddir)/lib \
-I$(top_srcdir)/common \
@SEARPC_CFLAGS@ \
@GLIB2_CFLAGS@ \
@FUSE_CFLAGS@ \
@MYSQL_CFLAGS@ \
-Wall
bin_PROGRAMS = seaf-fuse
noinst_HEADERS = seaf-fuse.h seafile-session.h repo-mgr.h
seaf_fuse_SOURCES = seaf-fuse.c \
seafile-session.c \
file.c \
getattr.c \
readdir.c \
repo-mgr.c \
../common/block-mgr.c \
../common/user-mgr.c \
../common/group-mgr.c \
../common/org-mgr.c \
../common/block-backend.c \
../common/block-backend-fs.c \
../common/branch-mgr.c \
../common/commit-mgr.c \
../common/fs-mgr.c \
../common/log.c \
../common/seaf-db.c \
../common/seaf-utils.c \
../common/obj-store.c \
../common/obj-backend-fs.c \
../common/obj-backend-riak.c \
../common/seafile-crypt.c \
../common/password-hash.c
seaf_fuse_LDADD = @GLIB2_LIBS@ @GOBJECT_LIBS@ @SSL_LIBS@ @LIB_RT@ @LIB_UUID@ \
-lsqlite3 @LIBEVENT_LIBS@ \
$(top_builddir)/common/cdc/libcdc.la \
@SEARPC_LIBS@ @JANSSON_LIBS@ @FUSE_LIBS@ @ZLIB_LIBS@ \
@MYSQL_LIBS@ -lsqlite3 @ARGON2_LIBS@
================================================
FILE: fuse/file.c
================================================
#include "common.h"
#define FUSE_USE_VERSION 26
#include
#include
#include
#include
#include "log.h"
#include "utils.h"
#include "seaf-fuse.h"
int read_file(SeafileSession *seaf,
const char *store_id, int version,
Seafile *file,
char *buf, size_t size,
off_t offset, struct fuse_file_info *info)
{
BlockHandle *handle = NULL;;
BlockMetadata *bmd;
char *blkid;
char *ptr;
off_t off = 0, nleft;
int i, n, ret = -EIO;
for (i = 0; i < file->n_blocks; i++) {
blkid = file->blk_sha1s[i];
bmd = seaf_block_manager_stat_block(seaf->block_mgr, store_id, version, blkid);
if (!bmd)
return -EIO;
if (offset < off + bmd->size) {
g_free (bmd);
break;
}
off += bmd->size;
g_free (bmd);
}
/* beyond the file size */
if (i == file->n_blocks)
return 0;
nleft = size;
ptr = buf;
while (nleft > 0 && i < file->n_blocks) {
blkid = file->blk_sha1s[i];
handle = seaf_block_manager_open_block(seaf->block_mgr,
store_id, version,
blkid, BLOCK_READ);
if (!handle) {
seaf_warning ("Failed to open block %s:%s.\n", store_id, blkid);
return -EIO;
}
/* trim the offset in a block */
if (offset > off) {
char *tmp = (char *)malloc(sizeof(char) * (offset - off));
if (!tmp)
return -ENOMEM;
n = seaf_block_manager_read_block(seaf->block_mgr, handle,
tmp, offset-off);
if (n != offset - off) {
seaf_warning ("Failed to read block %s:%s.\n", store_id, blkid);
free (tmp);
goto out;
}
off += n;
free(tmp);
}
if ((n = seaf_block_manager_read_block(seaf->block_mgr,
handle, ptr, nleft)) < 0) {
seaf_warning ("Failed to read block %s:%s.\n", store_id, blkid);
goto out;
}
nleft -= n;
ptr += n;
off += n;
++i;
/* At this point we should have read all the content of the block or
* have read up to @size bytes. So it's safe to close the block.
*/
seaf_block_manager_close_block(seaf->block_mgr, handle);
seaf_block_manager_block_handle_free (seaf->block_mgr, handle);
}
return size - nleft;
out:
if (handle) {
seaf_block_manager_close_block(seaf->block_mgr, handle);
seaf_block_manager_block_handle_free (seaf->block_mgr, handle);
}
return ret;
}
================================================
FILE: fuse/getattr.c
================================================
#include "common.h"
#define FUSE_USE_VERSION 26
#include
#include
#include
#include
#include "log.h"
#include "utils.h"
#include "seaf-fuse.h"
#include "seafile-session.h"
#include "seaf-utils.h"
static CcnetEmailUser *get_user_from_ccnet (SearpcClient *client, const char *user)
{
return (CcnetEmailUser *)searpc_client_call__object (client,
"get_emailuser", CCNET_TYPE_EMAIL_USER, NULL,
1, "string", user);
}
static int getattr_root(SeafileSession *seaf, struct stat *stbuf)
{
stbuf->st_mode = S_IFDIR | 0755;
stbuf->st_nlink = 2;
stbuf->st_size = 4096;
return 0;
}
static int getattr_user(SeafileSession *seaf, const char *user, struct stat *stbuf)
{
CcnetEmailUser *emailuser;
emailuser = ccnet_user_manager_get_emailuser (seaf->user_mgr, user, NULL);
if (!emailuser) {
return -ENOENT;
}
g_object_unref (emailuser);
stbuf->st_mode = S_IFDIR | 0755;
stbuf->st_nlink = 2;
stbuf->st_size = 4096;
return 0;
}
static int getattr_repo(SeafileSession *seaf,
const char *user, const char *repo_id, const char *repo_path,
struct stat *stbuf)
{
SeafRepo *repo = NULL;
SeafBranch *branch;
SeafCommit *commit = NULL;
guint32 mode = 0;
char *id = NULL;
int ret = 0;
repo = seaf_repo_manager_get_repo(seaf->repo_mgr, repo_id);
if (!repo) {
seaf_warning ("Failed to get repo %s.\n", repo_id);
ret = -ENOENT;
goto out;
}
branch = repo->head;
commit = seaf_commit_manager_get_commit(seaf->commit_mgr,
repo->id, repo->version,
branch->commit_id);
if (!commit) {
seaf_warning ("Failed to get commit %s:%.8s.\n", repo->id, branch->commit_id);
ret = -ENOENT;
goto out;
}
id = seaf_fs_manager_path_to_obj_id(seaf->fs_mgr,
repo->store_id, repo->version,
commit->root_id,
repo_path, &mode, NULL);
if (!id) {
seaf_warning ("Path %s doesn't exist in repo %s.\n", repo_path, repo_id);
ret = -ENOENT;
goto out;
}
if (S_ISDIR(mode)) {
SeafDir *dir;
GList *l;
int cnt = 2; /* '.' and '..' */
dir = seaf_fs_manager_get_seafdir(seaf->fs_mgr,
repo->store_id, repo->version, id);
if (dir) {
for (l = dir->entries; l; l = l->next)
cnt++;
}
if (strcmp (repo_path, "/") != 0) {
// get dirent of the dir
SeafDirent *dirent = seaf_fs_manager_get_dirent_by_path (seaf->fs_mgr,
repo->store_id,
repo->version,
commit->root_id,
repo_path, NULL);
if (dirent && repo->version != 0)
stbuf->st_mtime = dirent->mtime;
seaf_dirent_free (dirent);
}
stbuf->st_size += cnt * sizeof(SeafDirent);
stbuf->st_mode = mode | 0755;
stbuf->st_nlink = 2;
seaf_dir_free (dir);
} else if (S_ISREG(mode)) {
Seafile *file;
file = seaf_fs_manager_get_seafile(seaf->fs_mgr,
repo->store_id, repo->version, id);
if (file)
stbuf->st_size = file->file_size;
SeafDirent *dirent = seaf_fs_manager_get_dirent_by_path (seaf->fs_mgr,
repo->store_id,
repo->version,
commit->root_id,
repo_path, NULL);
if (dirent && repo->version != 0)
stbuf->st_mtime = dirent->mtime;
stbuf->st_mode = mode | 0644;
stbuf->st_nlink = 1;
seaf_dirent_free (dirent);
seafile_unref (file);
} else {
return -ENOENT;
}
out:
g_free (id);
seaf_repo_unref (repo);
seaf_commit_unref (commit);
return ret;
}
int do_getattr(SeafileSession *seaf, const char *path, struct stat *stbuf)
{
int n_parts;
char *user, *repo_id, *repo_path;
int ret = 0;
if (parse_fuse_path (path, &n_parts, &user, &repo_id, &repo_path) < 0) {
return -ENOENT;
}
switch (n_parts) {
case 0:
ret = getattr_root(seaf, stbuf);
break;
case 1:
ret = getattr_user(seaf, user, stbuf);
break;
case 2:
case 3:
ret = getattr_repo(seaf, user, repo_id, repo_path, stbuf);
break;
}
g_free (user);
g_free (repo_id);
g_free (repo_path);
return ret;
}
================================================
FILE: fuse/readdir.c
================================================
#include "common.h"
#define FUSE_USE_VERSION 26
#include
#include
#include
#include
#include "log.h"
#include "utils.h"
#include "seaf-fuse.h"
#include "seafile-session.h"
#include "seaf-utils.h"
static char *replace_slash (const char *repo_name)
{
char *ret = g_strdup(repo_name);
char *p;
for (p = ret; *p != 0; ++p)
if (*p == '/')
*p = '_';
return ret;
}
static GList *get_users_from_ccnet (SearpcClient *client, const char *source)
{
return searpc_client_call__objlist (client,
"get_emailusers", CCNET_TYPE_EMAIL_USER, NULL,
3, "string", source, "int", -1, "int", -1);
}
static CcnetEmailUser *get_user_from_ccnet (SearpcClient *client, const char *user)
{
return (CcnetEmailUser *)searpc_client_call__object (client,
"get_emailuser", CCNET_TYPE_EMAIL_USER, NULL,
1, "string", user);
}
static int readdir_root(SeafileSession *seaf,
void *buf, fuse_fill_dir_t filler, off_t offset,
struct fuse_file_info *info)
{
GList *users, *p;
CcnetEmailUser *user;
const char *email;
GHashTable *user_hash;
int dummy;
user_hash = g_hash_table_new_full (g_str_hash, g_str_equal, g_free, NULL);
users = ccnet_user_manager_get_emailusers (seaf->user_mgr, "DB", -1, -1, NULL);
for (p = users; p; p = p->next) {
user = p->data;
email = ccnet_email_user_get_email (user);
g_hash_table_insert (user_hash, g_strdup(email), &dummy);
g_object_unref (user);
}
g_list_free (users);
users = ccnet_user_manager_get_emailusers (seaf->user_mgr, "LDAPImport", -1, -1, NULL);
for (p = users; p; p = p->next) {
user = p->data;
email = ccnet_email_user_get_email (user);
g_hash_table_insert (user_hash, g_strdup(email), &dummy);
g_object_unref (user);
}
g_list_free (users);
users = g_hash_table_get_keys (user_hash);
for (p = users; p; p = p->next) {
email = p->data;
char *exclude = g_hash_table_lookup (seaf->excluded_users, email);
if (exclude)
continue;
filler (buf, email, NULL, 0);
}
g_list_free (users);
g_hash_table_destroy (user_hash);
return 0;
}
static int readdir_user(SeafileSession *seaf, const char *user,
void *buf, fuse_fill_dir_t filler, off_t offset,
struct fuse_file_info *info)
{
CcnetEmailUser *emailuser;
GList *list = NULL, *p;
GString *name;
emailuser = ccnet_user_manager_get_emailuser (seaf->user_mgr, user, NULL);
if (!emailuser) {
return -ENOENT;
}
g_object_unref (emailuser);
list = seaf_repo_manager_get_repos_by_owner (seaf->repo_mgr, user);
if (!list) {
return 0;
}
for (p = list; p; p = p->next) {
SeafRepo *repo = (SeafRepo *)p->data;
/* Don't list virtual repos. */
if (seaf_repo_manager_is_virtual_repo(seaf->repo_mgr, repo->id)) {
seaf_repo_unref (repo);
continue;
}
// Don't list encrypted repo
if (repo->encrypted) {
continue;
}
char *clean_repo_name = replace_slash (repo->name);
name = g_string_new ("");
g_string_printf (name, "%s_%s", repo->id, clean_repo_name);
filler(buf, name->str, NULL, 0);
g_string_free (name, TRUE);
g_free (clean_repo_name);
seaf_repo_unref (repo);
}
g_list_free (list);
return 0;
}
static int readdir_repo(SeafileSession *seaf,
const char *user, const char *repo_id, const char *repo_path,
void *buf, fuse_fill_dir_t filler, off_t offset,
struct fuse_file_info *info)
{
SeafRepo *repo = NULL;
SeafBranch *branch;
SeafCommit *commit = NULL;
SeafDir *dir = NULL;
GList *l;
int ret = 0;
repo = seaf_repo_manager_get_repo(seaf->repo_mgr, repo_id);
if (!repo) {
seaf_warning ("Failed to get repo %s.\n", repo_id);
ret = -ENOENT;
goto out;
}
branch = repo->head;
commit = seaf_commit_manager_get_commit(seaf->commit_mgr,
repo->id, repo->version,
branch->commit_id);
if (!commit) {
seaf_warning ("Failed to get commit %s:%.8s.\n", repo->id, branch->commit_id);
ret = -ENOENT;
goto out;
}
dir = seaf_fs_manager_get_seafdir_by_path(seaf->fs_mgr,
repo->store_id, repo->version,
commit->root_id,
repo_path, NULL);
if (!dir) {
seaf_warning ("Path %s doesn't exist in repo %s.\n", repo_path, repo_id);
ret = -ENOENT;
goto out;
}
for (l = dir->entries; l; l = l->next) {
SeafDirent *seaf_dent = (SeafDirent *) l->data;
/* FIXME: maybe we need to return stbuf */
filler(buf, seaf_dent->name, NULL, 0);
}
out:
seaf_repo_unref (repo);
seaf_commit_unref (commit);
seaf_dir_free (dir);
return ret;
}
int do_readdir(SeafileSession *seaf, const char *path, void *buf,
fuse_fill_dir_t filler, off_t offset,
struct fuse_file_info *info)
{
int n_parts;
char *user, *repo_id, *repo_path;
int ret = 0;
if (parse_fuse_path (path, &n_parts, &user, &repo_id, &repo_path) < 0) {
return -ENOENT;
}
switch (n_parts) {
case 0:
ret = readdir_root(seaf, buf, filler, offset, info);
break;
case 1:
ret = readdir_user(seaf, user, buf, filler, offset, info);
break;
case 2:
case 3:
ret = readdir_repo(seaf, user, repo_id, repo_path, buf, filler, offset, info);
break;
}
g_free (user);
g_free (repo_id);
g_free (repo_path);
return ret;
}
================================================
FILE: fuse/repo-mgr.c
================================================
/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
#include "common.h"
#include
#include "utils.h"
#include "log.h"
#include "seafile-session.h"
#include "commit-mgr.h"
#include "branch-mgr.h"
#include "repo-mgr.h"
#include "fs-mgr.h"
#include "seafile-error.h"
#include "seaf-db.h"
#define INDEX_DIR "index"
struct _SeafRepoManagerPriv {
};
static SeafRepo *
load_repo (SeafRepoManager *manager, const char *repo_id);
gboolean
is_repo_id_valid (const char *id)
{
if (!id)
return FALSE;
return is_uuid_valid (id);
}
SeafRepo*
seaf_repo_new (const char *id, const char *name, const char *desc)
{
SeafRepo* repo;
/* valid check */
repo = g_new0 (SeafRepo, 1);
memcpy (repo->id, id, 36);
repo->id[36] = '\0';
repo->name = g_strdup(name);
repo->desc = g_strdup(desc);
repo->ref_cnt = 1;
return repo;
}
void
seaf_repo_free (SeafRepo *repo)
{
if (repo->name) g_free (repo->name);
if (repo->desc) g_free (repo->desc);
if (repo->category) g_free (repo->category);
if (repo->head) seaf_branch_unref (repo->head);
g_free (repo);
}
void
seaf_repo_ref (SeafRepo *repo)
{
g_atomic_int_inc (&repo->ref_cnt);
}
void
seaf_repo_unref (SeafRepo *repo)
{
if (!repo)
return;
if (g_atomic_int_dec_and_test (&repo->ref_cnt))
seaf_repo_free (repo);
}
static void
set_head_common (SeafRepo *repo, SeafBranch *branch)
{
if (repo->head)
seaf_branch_unref (repo->head);
repo->head = branch;
seaf_branch_ref(branch);
}
void
seaf_repo_from_commit (SeafRepo *repo, SeafCommit *commit)
{
repo->name = g_strdup (commit->repo_name);
repo->desc = g_strdup (commit->repo_desc);
repo->encrypted = commit->encrypted;
repo->no_local_history = commit->no_local_history;
repo->version = commit->version;
}
void
seaf_repo_to_commit (SeafRepo *repo, SeafCommit *commit)
{
commit->repo_name = g_strdup (repo->name);
commit->repo_desc = g_strdup (repo->desc);
commit->encrypted = repo->encrypted;
commit->no_local_history = repo->no_local_history;
commit->version = repo->version;
}
static gboolean
collect_commit (SeafCommit *commit, void *vlist, gboolean *stop)
{
GList **commits = vlist;
/* The traverse function will unref the commit, so we need to ref it.
*/
seaf_commit_ref (commit);
*commits = g_list_prepend (*commits, commit);
return TRUE;
}
GList *
seaf_repo_get_commits (SeafRepo *repo)
{
GList *branches;
GList *ptr;
SeafBranch *branch;
GList *commits = NULL;
branches = seaf_branch_manager_get_branch_list (seaf->branch_mgr, repo->id);
if (branches == NULL) {
seaf_warning ("Failed to get branch list of repo %s.\n", repo->id);
return NULL;
}
for (ptr = branches; ptr != NULL; ptr = ptr->next) {
branch = ptr->data;
gboolean res = seaf_commit_manager_traverse_commit_tree (seaf->commit_mgr,
repo->id,
repo->version,
branch->commit_id,
collect_commit,
&commits,
FALSE);
if (!res) {
for (ptr = commits; ptr != NULL; ptr = ptr->next)
seaf_commit_unref ((SeafCommit *)(ptr->data));
g_list_free (commits);
goto out;
}
}
commits = g_list_reverse (commits);
out:
for (ptr = branches; ptr != NULL; ptr = ptr->next) {
seaf_branch_unref ((SeafBranch *)ptr->data);
}
return commits;
}
#if 0
static int
compare_repo (const SeafRepo *srepo, const SeafRepo *trepo)
{
return g_strcmp0 (srepo->id, trepo->id);
}
#endif
SeafRepoManager*
seaf_repo_manager_new (SeafileSession *seaf)
{
SeafRepoManager *mgr = g_new0 (SeafRepoManager, 1);
mgr->priv = g_new0 (SeafRepoManagerPriv, 1);
mgr->seaf = seaf;
return mgr;
}
int
seaf_repo_manager_init (SeafRepoManager *mgr)
{
return 0;
}
int
seaf_repo_manager_start (SeafRepoManager *mgr)
{
return 0;
}
static gboolean
repo_exists_in_db (SeafDB *db, const char *id)
{
char sql[256];
gboolean db_err = FALSE;
snprintf (sql, sizeof(sql), "SELECT repo_id FROM Repo WHERE repo_id = '%s'",
id);
return seaf_db_check_for_existence (db, sql, &db_err);
}
SeafRepo*
seaf_repo_manager_get_repo (SeafRepoManager *manager, const gchar *id)
{
SeafRepo repo;
int len = strlen(id);
if (len >= 37)
return NULL;
memcpy (repo.id, id, len + 1);
if (repo_exists_in_db (manager->seaf->db, id)) {
SeafRepo *ret = load_repo (manager, id);
if (!ret)
return NULL;
/* seaf_repo_ref (ret); */
return ret;
}
return NULL;
}
gboolean
seaf_repo_manager_repo_exists (SeafRepoManager *manager, const gchar *id)
{
SeafRepo repo;
memcpy (repo.id, id, 37);
return repo_exists_in_db (manager->seaf->db, id);
}
static void
load_repo_commit (SeafRepoManager *manager,
SeafRepo *repo,
SeafBranch *branch)
{
SeafCommit *commit;
commit = seaf_commit_manager_get_commit_compatible (manager->seaf->commit_mgr,
repo->id,
branch->commit_id);
if (!commit) {
seaf_warning ("Commit %s is missing\n", branch->commit_id);
repo->is_corrupted = TRUE;
return;
}
set_head_common (repo, branch);
seaf_repo_from_commit (repo, commit);
seaf_commit_unref (commit);
}
static gboolean
load_virtual_info (SeafDBRow *row, void *vrepo_id)
{
char *ret_repo_id = vrepo_id;
const char *origin_repo_id;
origin_repo_id = seaf_db_row_get_column_text (row, 0);
memcpy (ret_repo_id, origin_repo_id, 37);
return FALSE;
}
char *
get_origin_repo_id (SeafRepoManager *mgr, const char *repo_id)
{
char sql[256];
char origin_repo_id[37];
memset (origin_repo_id, 0, 37);
snprintf (sql, 256,
"SELECT origin_repo FROM VirtualRepo "
"WHERE repo_id = '%s'", repo_id);
seaf_db_foreach_selected_row (seaf->db, sql, load_virtual_info, origin_repo_id);
if (origin_repo_id[0] != 0)
return g_strdup(origin_repo_id);
else
return NULL;
}
static SeafRepo *
load_repo (SeafRepoManager *manager, const char *repo_id)
{
SeafRepo *repo;
SeafBranch *branch;
repo = seaf_repo_new(repo_id, NULL, NULL);
if (!repo) {
seaf_warning ("[repo mgr] failed to alloc repo.\n");
return NULL;
}
repo->manager = manager;
branch = seaf_branch_manager_get_branch (seaf->branch_mgr, repo_id, "master");
if (!branch) {
seaf_warning ("Failed to get master branch of repo %.8s.\n", repo_id);
repo->is_corrupted = TRUE;
} else {
load_repo_commit (manager, repo, branch);
seaf_branch_unref (branch);
}
if (repo->is_corrupted) {
seaf_warning ("Repo %.8s is corrupted.\n", repo->id);
seaf_repo_free (repo);
return NULL;
}
char *origin_repo_id = get_origin_repo_id (manager, repo->id);
if (origin_repo_id)
memcpy (repo->store_id, origin_repo_id, 36);
else
memcpy (repo->store_id, repo->id, 36);
g_free (origin_repo_id);
return repo;
}
static gboolean
collect_repo_id (SeafDBRow *row, void *data)
{
GList **p_ids = data;
const char *repo_id;
repo_id = seaf_db_row_get_column_text (row, 0);
*p_ids = g_list_prepend (*p_ids, g_strdup(repo_id));
return TRUE;
}
GList *
seaf_repo_manager_get_repo_id_list (SeafRepoManager *mgr)
{
GList *ret = NULL;
char sql[256];
snprintf (sql, 256, "SELECT repo_id FROM Repo");
if (seaf_db_foreach_selected_row (mgr->seaf->db, sql,
collect_repo_id, &ret) < 0)
return NULL;
return ret;
}
GList *
seaf_repo_manager_get_repo_list (SeafRepoManager *mgr, int start, int limit)
{
GList *id_list = NULL, *ptr;
GList *ret = NULL;
SeafRepo *repo;
char sql[256];
if (start == -1 && limit == -1)
snprintf (sql, 256, "SELECT repo_id FROM Repo");
else
snprintf (sql, 256, "SELECT repo_id FROM Repo LIMIT %d, %d", start, limit);
if (seaf_db_foreach_selected_row (mgr->seaf->db, sql,
collect_repo_id, &id_list) < 0)
return NULL;
for (ptr = id_list; ptr; ptr = ptr->next) {
char *repo_id = ptr->data;
repo = seaf_repo_manager_get_repo (mgr, repo_id);
if (repo != NULL)
ret = g_list_prepend (ret, repo);
}
string_list_free (id_list);
return g_list_reverse (ret);
}
GList *
seaf_repo_manager_get_repos_by_owner (SeafRepoManager *mgr,
const char *email)
{
GList *id_list = NULL, *ptr;
GList *ret = NULL;
char sql[256];
snprintf (sql, 256, "SELECT repo_id FROM RepoOwner WHERE owner_id='%s'",
email);
if (seaf_db_foreach_selected_row (mgr->seaf->db, sql,
collect_repo_id, &id_list) < 0)
return NULL;
for (ptr = id_list; ptr; ptr = ptr->next) {
char *repo_id = ptr->data;
SeafRepo *repo = seaf_repo_manager_get_repo (mgr, repo_id);
if (repo != NULL)
ret = g_list_prepend (ret, repo);
}
string_list_free (id_list);
return ret;
}
gboolean
seaf_repo_manager_is_virtual_repo (SeafRepoManager *mgr, const char *repo_id)
{
char sql[256];
gboolean db_err;
snprintf (sql, 256,
"SELECT 1 FROM VirtualRepo WHERE repo_id = '%s'", repo_id);
return seaf_db_check_for_existence (seaf->db, sql, &db_err);
}
================================================
FILE: fuse/repo-mgr.h
================================================
/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
#ifndef SEAF_REPO_MGR_H
#define SEAF_REPO_MGR_H
#include
#include "seafile-object.h"
#include "commit-mgr.h"
#include "branch-mgr.h"
struct _SeafRepoManager;
typedef struct _SeafRepo SeafRepo;
struct _SeafRepo {
struct _SeafRepoManager *manager;
gchar id[37];
gchar *name;
gchar *desc;
gchar *category; /* not used yet */
gboolean encrypted;
int enc_version;
gchar magic[33]; /* hash(repo_id + passwd), key stretched. */
gboolean no_local_history;
SeafBranch *head;
gboolean is_corrupted;
gboolean delete_pending;
int ref_cnt;
int version;
/* Used to access fs and block sotre.
* This id is different from repo_id when this repo is virtual.
* Virtual repos share fs and block store with its origin repo.
* However, commit store for each repo is always independent.
* So always use repo_id to access commit store.
*/
gchar store_id[37];
};
gboolean is_repo_id_valid (const char *id);
SeafRepo*
seaf_repo_new (const char *id, const char *name, const char *desc);
void
seaf_repo_free (SeafRepo *repo);
void
seaf_repo_ref (SeafRepo *repo);
void
seaf_repo_unref (SeafRepo *repo);
typedef struct _SeafRepoManager SeafRepoManager;
typedef struct _SeafRepoManagerPriv SeafRepoManagerPriv;
struct _SeafRepoManager {
struct _SeafileSession *seaf;
SeafRepoManagerPriv *priv;
};
SeafRepoManager*
seaf_repo_manager_new (struct _SeafileSession *seaf);
int
seaf_repo_manager_init (SeafRepoManager *mgr);
int
seaf_repo_manager_start (SeafRepoManager *mgr);
int
seaf_repo_manager_add_repo (SeafRepoManager *mgr, SeafRepo *repo);
int
seaf_repo_manager_del_repo (SeafRepoManager *mgr, SeafRepo *repo);
SeafRepo*
seaf_repo_manager_get_repo (SeafRepoManager *manager, const gchar *id);
gboolean
seaf_repo_manager_repo_exists (SeafRepoManager *manager, const gchar *id);
GList*
seaf_repo_manager_get_repo_list (SeafRepoManager *mgr, int start, int limit);
GList *
seaf_repo_manager_get_repo_id_list (SeafRepoManager *mgr);
GList *
seaf_repo_manager_get_repos_by_owner (SeafRepoManager *mgr,
const char *email);
gboolean
seaf_repo_manager_is_virtual_repo (SeafRepoManager *mgr, const char *repo_id);
#endif
================================================
FILE: fuse/seaf-fuse.c
================================================
#include "common.h"
#include
#include
#define FUSE_USE_VERSION 26
#include
#include
#include
#include
#include
#include "log.h"
#include "utils.h"
#include "seaf-fuse.h"
SeafileSession *seaf = NULL;
static char *parse_repo_id (const char *repo_id_name)
{
if (strlen(repo_id_name) < 36)
return NULL;
return g_strndup(repo_id_name, 36);
}
/*
* Path format can be:
* 1. / --> list all users
* 2. /user --> list libraries owned by user
* 3. /user/repo-id_name --> list root of the library
* 4. /user/repo-id_name/repo_path --> list library content
*/
int parse_fuse_path (const char *path,
int *n_parts, char **user, char **repo_id, char **repo_path)
{
char **tokens;
int n;
int ret = 0;
*user = NULL;
*repo_id = NULL;
*repo_path = NULL;
if (*path == '/')
++path;
tokens = g_strsplit (path, "/", 3);
n = g_strv_length (tokens);
*n_parts = n;
switch (n) {
case 0:
break;
case 1:
*user = g_strdup(tokens[0]);
break;
case 2:
*repo_id = parse_repo_id(tokens[1]);
if (*repo_id == NULL) {
ret = -1;
break;
}
*user = g_strdup(tokens[0]);
*repo_path = g_strdup("/");
break;
case 3:
*repo_id = parse_repo_id(tokens[1]);
if (*repo_id == NULL) {
ret = -1;
break;
}
*user = g_strdup(tokens[0]);
*repo_path = g_strdup(tokens[2]);
break;
}
g_strfreev (tokens);
return ret;
}
static int seaf_fuse_getattr(const char *path, struct stat *stbuf)
{
memset(stbuf, 0, sizeof(struct stat));
return do_getattr(seaf, path, stbuf);
}
static int seaf_fuse_readdir(const char *path, void *buf,
fuse_fill_dir_t filler, off_t offset,
struct fuse_file_info *info)
{
filler(buf, ".", NULL, 0);
filler(buf, "..", NULL, 0);
return do_readdir(seaf, path, buf, filler, offset, info);
}
static int seaf_fuse_open(const char *path, struct fuse_file_info *info)
{
int n_parts;
char *user, *repo_id, *repo_path;
SeafRepo *repo = NULL;
SeafBranch *branch = NULL;
SeafCommit *commit = NULL;
guint32 mode = 0;
int ret = 0;
/* Now we only support read-only mode */
if ((info->flags & 3) != O_RDONLY)
return -EACCES;
if (parse_fuse_path (path, &n_parts, &user, &repo_id, &repo_path) < 0) {
seaf_warning ("Invalid input path %s.\n", path);
return -ENOENT;
}
if (n_parts != 2 && n_parts != 3) {
seaf_warning ("Invalid input path for open: %s.\n", path);
ret = -EACCES;
goto out;
}
repo = seaf_repo_manager_get_repo(seaf->repo_mgr, repo_id);
if (!repo) {
seaf_warning ("Failed to get repo %s.\n", repo_id);
ret = -ENOENT;
goto out;
}
branch = repo->head;
commit = seaf_commit_manager_get_commit(seaf->commit_mgr,
repo->id,
repo->version,
branch->commit_id);
if (!commit) {
seaf_warning ("Failed to get commit %s:%.8s.\n", repo->id, branch->commit_id);
ret = -ENOENT;
goto out;
}
char *id = seaf_fs_manager_path_to_obj_id(seaf->fs_mgr,
repo->store_id, repo->version,
commit->root_id,
repo_path, &mode, NULL);
if (!id) {
seaf_warning ("Path %s doesn't exist in repo %s.\n", repo_path, repo_id);
ret = -ENOENT;
goto out;
}
g_free (id);
if (!S_ISREG(mode))
return -EACCES;
out:
g_free (user);
g_free (repo_id);
g_free (repo_path);
seaf_repo_unref (repo);
seaf_commit_unref (commit);
return ret;
}
static int seaf_fuse_read(const char *path, char *buf, size_t size,
off_t offset, struct fuse_file_info *info)
{
int n_parts;
char *user, *repo_id, *repo_path;
SeafRepo *repo = NULL;
SeafBranch *branch = NULL;
SeafCommit *commit = NULL;
Seafile *file = NULL;
char *file_id = NULL;
int ret = 0;
/* Now we only support read-only mode */
if ((info->flags & 3) != O_RDONLY)
return -EACCES;
if (parse_fuse_path (path, &n_parts, &user, &repo_id, &repo_path) < 0) {
seaf_warning ("Invalid input path %s.\n", path);
return -ENOENT;
}
if (n_parts != 2 && n_parts != 3) {
seaf_warning ("Invalid input path for open: %s.\n", path);
ret = -EACCES;
goto out;
}
repo = seaf_repo_manager_get_repo(seaf->repo_mgr, repo_id);
if (!repo) {
seaf_warning ("Failed to get repo %s.\n", repo_id);
ret = -ENOENT;
goto out;
}
branch = repo->head;
commit = seaf_commit_manager_get_commit(seaf->commit_mgr,
repo->id,
repo->version,
branch->commit_id);
if (!commit) {
seaf_warning ("Failed to get commit %s:%.8s.\n", repo->id, branch->commit_id);
ret = -ENOENT;
goto out;
}
file_id = seaf_fs_manager_get_seafile_id_by_path(seaf->fs_mgr,
repo->store_id, repo->version,
commit->root_id,
repo_path, NULL);
if (!file_id) {
seaf_warning ("Path %s doesn't exist in repo %s.\n", repo_path, repo_id);
ret = -ENOENT;
goto out;
}
file = seaf_fs_manager_get_seafile(seaf->fs_mgr,
repo->store_id, repo->version, file_id);
if (!file) {
ret = -ENOENT;
goto out;
}
ret = read_file(seaf, repo->store_id, repo->version,
file, buf, size, offset, info);
seafile_unref (file);
out:
g_free (user);
g_free (repo_id);
g_free (repo_path);
g_free (file_id);
seaf_repo_unref (repo);
seaf_commit_unref (commit);
return ret;
}
struct options {
char *central_config_dir;
char *config_dir;
char *seafile_dir;
char *log_file;
} options;
#define SEAF_FUSE_OPT_KEY(t, p, v) { t, offsetof(struct options, p), v }
enum {
KEY_VERSION,
KEY_HELP,
};
static struct fuse_opt seaf_fuse_opts[] = {
SEAF_FUSE_OPT_KEY("-c %s", config_dir, 0),
SEAF_FUSE_OPT_KEY("--config %s", config_dir, 0),
SEAF_FUSE_OPT_KEY("-F %s", central_config_dir, 0),
SEAF_FUSE_OPT_KEY("--central-config-dir %s", central_config_dir, 0),
SEAF_FUSE_OPT_KEY("-d %s", seafile_dir, 0),
SEAF_FUSE_OPT_KEY("--seafdir %s", seafile_dir, 0),
SEAF_FUSE_OPT_KEY("-l %s", log_file, 0),
SEAF_FUSE_OPT_KEY("--logfile %s", log_file, 0),
FUSE_OPT_KEY("-V", KEY_VERSION),
FUSE_OPT_KEY("--version", KEY_VERSION),
FUSE_OPT_KEY("-h", KEY_HELP),
FUSE_OPT_KEY("--help", KEY_HELP),
FUSE_OPT_END
};
static struct fuse_operations seaf_fuse_ops = {
.getattr = seaf_fuse_getattr,
.readdir = seaf_fuse_readdir,
.open = seaf_fuse_open,
.read = seaf_fuse_read,
};
int main(int argc, char *argv[])
{
struct fuse_args args = FUSE_ARGS_INIT(argc, argv);
const char *debug_str = NULL;
char *config_dir = DEFAULT_CONFIG_DIR;
char *central_config_dir = NULL;
char *seafile_dir = NULL;
char *logfile = NULL;
char *ccnet_debug_level_str = "info";
char *seafile_debug_level_str = "debug";
int ret;
memset(&options, 0, sizeof(struct options));
if (fuse_opt_parse(&args, &options, seaf_fuse_opts, NULL) == -1) {
seaf_warning("Parse argument Failed\n");
exit(1);
}
#if !GLIB_CHECK_VERSION(2,36,0)
g_type_init();
#endif
config_dir = options.config_dir ? : DEFAULT_CONFIG_DIR;
config_dir = ccnet_expand_path (config_dir);
central_config_dir = options.central_config_dir;
if (!debug_str)
debug_str = g_getenv("SEAFILE_DEBUG");
seafile_debug_set_flags_string(debug_str);
if (!options.seafile_dir)
seafile_dir = g_build_filename(config_dir, "seafile", NULL);
else
seafile_dir = options.seafile_dir;
if (!options.log_file)
logfile = g_build_filename(seafile_dir, "seaf-fuse.log", NULL);
else
logfile = options.log_file;
if (seafile_log_init(logfile, ccnet_debug_level_str,
seafile_debug_level_str, "seaf-fuse") < 0) {
fprintf (stderr, "Failed to init log.\n");
exit(1);
}
seaf = seafile_session_new(central_config_dir, seafile_dir, config_dir);
if (!seaf) {
seaf_warning("Failed to create seafile session.\n");
exit(1);
}
if (seafile_session_init(seaf) < 0) {
seaf_warning("Failed to init seafile session.\n");
exit(1);
}
set_syslog_config (seaf->config);
ret = fuse_main(args.argc, args.argv, &seaf_fuse_ops, NULL);
fuse_opt_free_args(&args);
return ret;
}
================================================
FILE: fuse/seaf-fuse.h
================================================
#ifndef SEAF_FUSE_H
#define SEAF_FUSE_H
#include "seafile-session.h"
int parse_fuse_path (const char *path,
int *n_parts, char **user, char **repo_id, char **repo_path);
SeafDirent *
fuse_get_dirent_by_path (SeafFSManager *mgr,
const char *repo_id,
int version,
const char *root_id,
const char *path);
/* file.c */
int read_file(SeafileSession *seaf, const char *store_id, int version,
Seafile *file, char *buf, size_t size,
off_t offset, struct fuse_file_info *info);
/* getattr.c */
int do_getattr(SeafileSession *seaf, const char *path, struct stat *stbuf);
/* readdir.c */
int do_readdir(SeafileSession *seaf, const char *path, void *buf,
fuse_fill_dir_t filler, off_t offset,
struct fuse_file_info *info);
#endif /* SEAF_FUSE_H */
================================================
FILE: fuse/seafile-session.c
================================================
#include "common.h"
#include
#include
#include
#include
#include
#include "seafile-session.h"
#include "seaf-utils.h"
#include "log.h"
static int
read_excluded_users (SeafileSession *session);
SeafileSession *
seafile_session_new(const char *central_config_dir,
const char *seafile_dir,
const char *ccnet_dir)
{
char *abs_central_config_dir = NULL;
char *abs_seafile_dir;
char *abs_ccnet_dir = NULL;
char *tmp_file_dir;
char *config_file_path;
struct stat st;
GKeyFile *config;
SeafileSession *session = NULL;
abs_ccnet_dir = ccnet_expand_path (ccnet_dir);
abs_seafile_dir = ccnet_expand_path (seafile_dir);
tmp_file_dir = g_build_filename(abs_seafile_dir, "tmpfiles", NULL);
if (central_config_dir) {
abs_central_config_dir = ccnet_expand_path (central_config_dir);
}
config_file_path = g_build_filename(
abs_central_config_dir ? abs_central_config_dir : abs_seafile_dir,
"seafile.conf", NULL);
if (g_stat(abs_seafile_dir, &st) < 0 || !S_ISDIR(st.st_mode)) {
seaf_warning ("Seafile data dir %s does not exist and is unable to create\n",
abs_seafile_dir);
goto onerror;
}
if (g_stat(tmp_file_dir, &st) < 0 || !S_ISDIR(st.st_mode)) {
seaf_warning("Seafile tmp dir %s does not exist and is unable to create\n",
tmp_file_dir);
goto onerror;
}
if (g_stat(abs_ccnet_dir, &st) < 0 || !S_ISDIR(st.st_mode)) {
seaf_warning("Ccnet dir %s does not exist and is unable to create\n",
abs_ccnet_dir);
goto onerror;
}
GError *error = NULL;
config = g_key_file_new ();
if (!g_key_file_load_from_file (config, config_file_path,
G_KEY_FILE_NONE, &error)) {
seaf_warning ("Failed to load config file.\n");
g_free (config_file_path);
g_key_file_free (config);
goto onerror;
}
g_free (config_file_path);
session = g_new0(SeafileSession, 1);
session->seaf_dir = abs_seafile_dir;
session->ccnet_dir = abs_ccnet_dir;
session->tmp_file_dir = tmp_file_dir;
session->config = config;
session->excluded_users = g_hash_table_new_full (g_str_hash, g_str_equal,
g_free, NULL);
if (load_database_config (session) < 0) {
seaf_warning ("Failed to load database config.\n");
goto onerror;
}
if (load_ccnet_database_config (session) < 0) {
seaf_warning ("Failed to load ccnet database config.\n");
goto onerror;
}
if (read_excluded_users (session) < 0) {
seaf_warning ("Failed to load excluded users.\n");
goto onerror;
}
session->fs_mgr = seaf_fs_manager_new (session, abs_seafile_dir);
if (!session->fs_mgr)
goto onerror;
session->block_mgr = seaf_block_manager_new (session, abs_seafile_dir);
if (!session->block_mgr)
goto onerror;
session->commit_mgr = seaf_commit_manager_new (session);
if (!session->commit_mgr)
goto onerror;
session->repo_mgr = seaf_repo_manager_new (session);
if (!session->repo_mgr)
goto onerror;
session->branch_mgr = seaf_branch_manager_new (session);
if (!session->branch_mgr)
goto onerror;
session->user_mgr = ccnet_user_manager_new (session);
if (!session->user_mgr)
goto onerror;
session->group_mgr = ccnet_group_manager_new (session);
if (!session->group_mgr)
goto onerror;
return session;
onerror:
free (abs_seafile_dir);
free (abs_ccnet_dir);
g_free (session);
return NULL;
}
static int
read_excluded_users (SeafileSession *session)
{
char *users;
int l, i;
char *hash_value;
users = seaf_key_file_get_string (session->config, "fuse", "excluded_users", NULL);
if (!users)
return 0;
char **parts = g_strsplit_set(users, " ,", 0);
l = g_strv_length(parts);
if (l > 0)
hash_value = g_new0(char, 1);
for (i = 0; i < l; i++) {
if (g_strcmp0(parts[i], "") == 0)
continue;
g_hash_table_insert (session->excluded_users, g_strdup(parts[i]), hash_value);
}
g_strfreev (parts);
g_free (users);
return 0;
}
int
seafile_session_init (SeafileSession *session)
{
if (seaf_commit_manager_init (session->commit_mgr) < 0)
return -1;
if (seaf_fs_manager_init (session->fs_mgr) < 0)
return -1;
if (seaf_branch_manager_init (session->branch_mgr) < 0)
return -1;
if (seaf_repo_manager_init (session->repo_mgr) < 0)
return -1;
if (ccnet_user_manager_prepare (session->user_mgr) < 0) {
seaf_warning ("Failed to init user manager.\n");
return -1;
}
if (ccnet_group_manager_prepare (session->group_mgr) < 0) {
seaf_warning ("Failed to init group manager.\n");
return -1;
}
return 0;
}
int
seafile_session_start (SeafileSession *session)
{
return 0;
}
================================================
FILE: fuse/seafile-session.h
================================================
#ifndef SEAFILE_SESSION_H
#define SEAFILE_SESSION_H
#include
#include
#include
#include "block-mgr.h"
#include "fs-mgr.h"
#include "branch-mgr.h"
#include "commit-mgr.h"
#include "repo-mgr.h"
#include "user-mgr.h"
#include "group-mgr.h"
#include "org-mgr.h"
typedef struct _SeafileSession SeafileSession;
struct _SeafileSession {
char *seaf_dir;
char *ccnet_dir;
char *tmp_file_dir;
/* Config that's only loaded on start */
GKeyFile *config;
SeafDB *db;
SeafDB *ccnet_db;
SeafDB *seahub_db;
SeafBlockManager *block_mgr;
SeafFSManager *fs_mgr;
SeafBranchManager *branch_mgr;
SeafCommitManager *commit_mgr;
SeafRepoManager *repo_mgr;
CcnetUserManager *user_mgr;
CcnetGroupManager *group_mgr;
CcnetOrgManager *org_mgr;
GHashTable *excluded_users;
gboolean create_tables;
gboolean ccnet_create_tables;
};
extern SeafileSession *seaf;
SeafileSession *
seafile_session_new(const char *central_config_dir,
const char *seafile_dir,
const char *ccnet_dir);
int
seafile_session_init (SeafileSession *session);
int
seafile_session_start (SeafileSession *session);
#endif
================================================
FILE: include/Makefile.am
================================================
noinst_HEADERS = seafile-rpc.h seafile-error.h
================================================
FILE: include/seafile-error.h
================================================
#ifndef SEAFILE_ERROR_H
#define SEAFILE_ERROR_H
#define SEAF_ERR_GENERAL 500
#define SEAF_ERR_BAD_REPO 501
#define SEAF_ERR_BAD_COMMIT 502
#define SEAF_ERR_BAD_ARGS 503
#define SEAF_ERR_INTERNAL 504
#define SEAF_ERR_BAD_FILE 505
#define SEAF_ERR_BAD_RELAY 506
#define SEAF_ERR_LIST_COMMITS 507
#define SEAF_ERR_REPO_AUTH 508
#define SEAF_ERR_GC_NOT_STARTED 509
#define SEAF_ERR_MONITOR_NOT_CONNECTED 510
#define SEAF_ERR_BAD_DIR_ID 511
#define SEAF_ERR_NO_WORKTREE 512
#define SEAF_ERR_BAD_PEER_ID 513
#define SEAF_ERR_REPO_LOCKED 514
#define SEAF_ERR_DIR_MISSING 515
#define SEAF_ERR_PATH_NO_EXIST 516 /* the dir or file pointed by this path not exists */
#define POST_FILE_ERR_FILENAME 517
#define POST_FILE_ERR_BLOCK_MISSING 518
#define POST_FILE_ERR_QUOTA_FULL 519
#define SEAF_ERR_CONCURRENT_UPLOAD 520
#define SEAF_ERR_FILES_WITH_SAME_NAME 521
#define SEAF_ERR_GC_CONFLICT 522
#endif
================================================
FILE: include/seafile-rpc.h
================================================
#ifndef _SEAFILE_RPC_H
#define _SEAFILE_RPC_H
#include "seafile-object.h"
/**
* seafile_get_session_info:
*
* Returns: a SeafileSessionInfo object.
*/
GObject *
seafile_get_session_info (GError **error);
/**
* seafile_get_repo_list:
*
* Returns repository list.
*/
GList* seafile_get_repo_list (int start, int limit, const char *order_by, int ret_virt_repo, GError **error);
gint64
seafile_count_repos (GError **error);
/**
* seafile_get_trash_repo_list:
*
* Returns deleted repository list.
*/
GList* seafile_get_trash_repo_list(int start, int limit, GError **error);
int
seafile_del_repo_from_trash (const char *repo_id, GError **error);
int
seafile_restore_repo_from_trash (const char *repo_id, GError **error);
GList *
seafile_get_trash_repos_by_owner (const char *owner, GError **error);
int
seafile_empty_repo_trash (GError **error);
int
seafile_empty_repo_trash_by_owner (const char *owner, GError **error);
/**
* seafile_get_commit_list:
*
* @limit: if limit <= 0, all commits start from @offset will be returned.
*
* Returns: commit list of a given repo.
*
* Possible Error:
* 1. Bad Argument
* 2. No head and branch master
* 3. Failed to list commits
*/
GList* seafile_get_commit_list (const gchar *repo,
int offset,
int limit,
GError **error);
/**
* seafile_get_commit:
* @id: the commit id.
*
* Returns: the commit object.
*/
GObject* seafile_get_commit (const char *repo_id, int version,
const gchar *id, GError **error);
/**
* seafile_get_repo:
*
* Returns: repo
*/
GObject* seafile_get_repo (const gchar* id, GError **error);
GObject *
seafile_get_repo_sync_task (const char *repo_id, GError **error);
/**
* seafile_get_repo_sync_info:
*/
GObject *
seafile_get_repo_sync_info (const char *repo_id, GError **error);
GList*
seafile_get_repo_sinfo (const char *repo_id, GError **error);
/* [seafile_get_config] returns the value of the config entry whose name is
* [key] in config.db
*/
char *seafile_get_config (const char *key, GError **error);
/* [seafile_set_config] set the value of config key in config.db; old value
* would be overwritten. */
int seafile_set_config (const char *key, const char *value, GError **error);
int
seafile_set_config_int (const char *key, int value, GError **error);
int
seafile_get_config_int (const char *key, GError **error);
int
seafile_set_upload_rate_limit (int limit, GError **error);
int
seafile_set_download_rate_limit (int limit, GError **error);
/**
* seafile_destroy_repo:
* @repo_id: repository id.
*/
int seafile_destroy_repo (const gchar *repo_id, GError **error);
int
seafile_unsync_repos_by_account (const char *server_addr, const char *email, GError **error);
int
seafile_remove_repo_tokens_by_account (const char *server_addr, const char *email, GError **error);
int
seafile_set_repo_token (const char *repo_id, const char *token, GError **error);
int
seafile_get_download_rate(GError **error);
int
seafile_get_upload_rate(GError **error);
/**
* seafile_edit_repo:
* @repo_id: repository id.
* @name: new name of the repository, NULL if unchanged.
* @description: new description of the repository, NULL if unchanged.
*/
int seafile_edit_repo (const gchar *repo_id,
const gchar *name,
const gchar *description,
const gchar *user,
GError **error);
int
seafile_change_repo_passwd (const char *repo_id,
const char *old_passwd,
const char *new_passwd,
const char *user,
GError **error);
int
seafile_upgrade_repo_pwd_hash_algorithm (const char *repo_id,
const char *user,
const char *passwd,
const char *pwd_hash_algo,
const char *pwd_hash_params,
GError **error);
/**
* seafile_repo_size:
*
* Returns: the size of a repo
*
* Possible Error:
* 1. Bad Argument
* 2. No local branch (No local branch record in branch.db)
* 3. Database error
* 4. Calculate branch size error
*/
gint64
seafile_repo_size(const gchar *repo_id, GError **error);
int
seafile_repo_last_modify(const char *repo_id, GError **error);
int seafile_set_repo_lantoken (const gchar *repo_id,
const gchar *token,
GError **error);
gchar* seafile_get_repo_lantoken (const gchar *repo_id,
GError **error);
int
seafile_set_repo_property (const char *repo_id,
const char *key,
const char *value,
GError **error);
gchar *
seafile_get_repo_property (const char *repo_id,
const char *key,
GError **error);
char *
seafile_get_repo_relay_address (const char *repo_id,
GError **error);
char *
seafile_get_repo_relay_port (const char *repo_id,
GError **error);
int
seafile_update_repo_relay_info (const char *repo_id,
const char *new_addr,
const char *new_port,
GError **error);
int
seafile_update_repos_server_host (const char *old_host,
const char *new_host,
const char *new_server_url,
GError **error);
int seafile_disable_auto_sync (GError **error);
int seafile_enable_auto_sync (GError **error);
int seafile_is_auto_sync_enabled (GError **error);
char *
seafile_get_path_sync_status (const char *repo_id,
const char *path,
int is_dir,
GError **error);
int
seafile_mark_file_locked (const char *repo_id, const char *path, GError **error);
int
seafile_mark_file_unlocked (const char *repo_id, const char *path, GError **error);
char *
seafile_get_server_property (const char *server_url, const char *key, GError **error);
int
seafile_set_server_property (const char *server_url,
const char *key,
const char *value,
GError **error);
/**
* seafile_list_dir:
* List a directory.
*
* Returns: a list of dirents.
*
* @limit: if limit <= 0, all dirents start from @offset will be returned.
*/
GList * seafile_list_dir (const char *repo_id,
const char *dir_id, int offset, int limit, GError **error);
/**
* seafile_list_file_blocks:
* List the blocks of a file.
*
* Returns: a list of block ids speprated by '\n'.
*
* @limit: if limit <= 0, all blocks start from @offset will be returned.
*/
char * seafile_list_file_blocks (const char *repo_id,
const char *file_id,
int offset, int limit,
GError **error);
/**
* seafile_list_dir_by_path:
* List a directory in a commit by the path of the directory.
*
* Returns: a list of dirents.
*/
GList * seafile_list_dir_by_path (const char *repo_id,
const char *commit_id, const char *path, GError **error);
/**
* seafile_get_dir_id_by_commit_and_path:
* Get the dir_id of the path
*
* Returns: the dir_id of the path
*/
char * seafile_get_dir_id_by_commit_and_path (const char *repo_id,
const char *commit_id,
const char *path,
GError **error);
/**
* seafile_revert:
* Reset the repo to a previous state by creating a new commit.
*/
int seafile_revert (const char *repo_id, const char *commit, GError **error);
char *
seafile_gen_default_worktree (const char *worktree_parent,
const char *repo_name,
GError **error);
int
seafile_check_path_for_clone(const char *path, GError **error);
/**
* seafile_clone:
*
* Fetch a new repo and then check it out.
*/
char *
seafile_clone (const char *repo_id,
int repo_version,
const char *peer_id,
const char *repo_name,
const char *worktree,
const char *token,
const char *passwd,
const char *magic,
const char *peer_addr,
const char *peer_port,
const char *email,
const char *random_key,
int enc_version,
const char *more_info,
GError **error);
char *
seafile_download (const char *repo_id,
int repo_version,
const char *peer_id,
const char *repo_name,
const char *wt_parent,
const char *token,
const char *passwd,
const char *magic,
const char *peer_addr,
const char *peer_port,
const char *email,
const char *random_key,
int enc_version,
const char *more_info,
GError **error);
int
seafile_cancel_clone_task (const char *repo_id, GError **error);
int
seafile_remove_clone_task (const char *repo_id, GError **error);
/**
* seafile_get_clone_tasks:
*
* Get a list of clone tasks.
*/
GList *
seafile_get_clone_tasks (GError **error);
/**
* seafile_sync:
*
* Sync a repo with relay.
*/
int seafile_sync (const char *repo_id, const char *peer_id, GError **error);
/**
* seafile_get_total_block_size:
*
* Get the sum of size of all the blocks.
*/
gint64
seafile_get_total_block_size (GError **error);
/**
* seafile_get_commit_tree_block_number:
*
* Get the number of blocks belong to the commit tree.
*
* @commit_id: the head of the commit tree.
*
* Returns: -1 if the calculation is in progress, -2 if error, >=0 otherwise.
*/
int
seafile_get_commit_tree_block_number (const char *commit_id, GError **error);
/**
* seafile_gc:
* Start garbage collection.
*/
int
seafile_gc (GError **error);
/**
* seafile_gc_get_progress:
* Get progress of GC.
*
* Returns:
* progress of GC in precentage.
* -1 if GC is not running.
*/
/* int */
/* seafile_gc_get_progress (GError **error); */
/* ----------------- Task Related -------------- */
/**
* seafile_find_transfer:
*
* Find a non finished task of a repo
*/
GObject *
seafile_find_transfer_task (const char *repo_id, GError *error);
int seafile_cancel_task (const gchar *task_id, int task_type, GError **error);
/**
* Remove finished upload task
*/
int seafile_remove_task (const char *task_id, int task_type, GError **error);
/* ------------------ Relay specific RPC calls. ------------ */
/**
* seafile_diff:
*
* Show the difference between @old commit and @new commit. If @old is NULL, then
* show the difference between @new commit and its parent.
*
* @old and @new can also be branch name.
*/
GList *
seafile_diff (const char *repo_id, const char *old, const char *new,
int fold_dir_results, GError **error);
GList *
seafile_branch_gets (const char *repo_id, GError **error);
/**
* Return 1 if user is the owner of repo, otherwise return 0.
*/
int
seafile_is_repo_owner (const char *email, const char *repo_id,
GError **error);
int
seafile_set_repo_owner(const char *repo_id, const char *email,
GError **error);
/**
* Return owner id of repo
*/
char *
seafile_get_repo_owner(const char *repo_id, GError **error);
GList *
seafile_get_orphan_repo_list(GError **error);
GList *
seafile_list_owned_repos (const char *email, int ret_corrupted, int start, int limit,
GError **error);
GList *
seafile_search_repos_by_name(const char *name, GError **error);
/**
* seafile_add_chunk_server:
* @server: ID for the chunk server.
*
* Add a chunk server on a relay server.
*/
int seafile_add_chunk_server (const char *server, GError **error);
/**
* seafile_del_chunk_server:
* @server: ID for the chunk server.
*
* Delete a chunk server on a relay server.
*/
int seafile_del_chunk_server (const char *server, GError **error);
/**
* seafile_list_chunk_servers:
*
* List chunk servers set on a relay server.
*/
char *seafile_list_chunk_servers (GError **error);
gint64 seafile_get_user_quota_usage (const char *email, GError **error);
gint64 seafile_get_user_share_usage (const char *email, GError **error);
gint64
seafile_server_repo_size(const char *repo_id, GError **error);
int
seafile_repo_set_access_property (const char *repo_id, const char *ap,
GError **error);
char *
seafile_repo_query_access_property (const char *repo_id, GError **error);
char *
seafile_web_get_access_token (const char *repo_id,
const char *obj_id,
const char *op,
const char *username,
int use_onetime,
GError **error);
GObject *
seafile_web_query_access_token (const char *token, GError **error);
char *
seafile_query_zip_progress (const char *token, GError **error);
int
seafile_cancel_zip_task (const char *token, GError **error);
GObject *
seafile_get_checkout_task (const char *repo_id, GError **error);
GList *
seafile_get_sync_task_list (GError **error);
char *
seafile_share_subdir_to_user (const char *repo_id,
const char *path,
const char *owner,
const char *share_user,
const char *permission,
const char *passwd,
GError **error);
int
seafile_unshare_subdir_for_user (const char *repo_id,
const char *path,
const char *owner,
const char *share_user,
GError **error);
int
seafile_update_share_subdir_perm_for_user (const char *repo_id,
const char *path,
const char *owner,
const char *share_user,
const char *permission,
GError **error);
int
seafile_add_share (const char *repo_id, const char *from_email,
const char *to_email, const char *permission,
GError **error);
GList *
seafile_list_share_repos (const char *email, const char *type,
int start, int limit, GError **error);
GList *
seafile_list_repo_shared_to (const char *from_user, const char *repo_id,
GError **error);
GList *
seafile_list_repo_shared_group (const char *from_user, const char *repo_id,
GError **error);
int
seafile_remove_share (const char *repo_id, const char *from_email,
const char *to_email, GError **error);
char *
seafile_share_subdir_to_group (const char *repo_id,
const char *path,
const char *owner,
int share_group,
const char *permission,
const char *passwd,
GError **error);
int
seafile_unshare_subdir_for_group (const char *repo_id,
const char *path,
const char *owner,
int share_group,
GError **error);
int
seafile_update_share_subdir_perm_for_group (const char *repo_id,
const char *path,
const char *owner,
int share_group,
const char *permission,
GError **error);
int
seafile_group_share_repo (const char *repo_id, int group_id,
const char *user_name, const char *permission,
GError **error);
int
seafile_group_unshare_repo (const char *repo_id, int group_id,
const char *user_name, GError **error);
/* Get groups that a repo is shared to */
char *
seafile_get_shared_groups_by_repo(const char *repo_id, GError **error);
char *
seafile_get_group_repoids (int group_id, GError **error);
GList *
seafile_get_repos_by_group (int group_id, GError **error);
GList *
seafile_get_group_repos_by_owner (char *user, GError **error);
char *
seafile_get_group_repo_owner (const char *repo_id, GError **error);
int
seafile_remove_repo_group(int group_id, const char *username, GError **error);
gint64
seafile_get_file_size (const char *store_id, int version,
const char *file_id, GError **error);
gint64
seafile_get_dir_size (const char *store_id, int version,
const char *dir_id, GError **error);
int
seafile_set_repo_history_limit (const char *repo_id,
int days,
GError **error);
int
seafile_get_repo_history_limit (const char *repo_id,
GError **error);
int
seafile_set_repo_valid_since (const char *repo_id,
gint64 timestamp,
GError **error);
int
seafile_check_passwd (const char *repo_id,
const char *magic,
GError **error);
int
seafile_set_passwd (const char *repo_id,
const char *user,
const char *passwd,
GError **error);
int
seafile_unset_passwd (const char *repo_id,
const char *user,
GError **error);
int
seafile_is_passwd_set (const char *repo_id, const char *user, GError **error);
GObject *
seafile_get_decrypt_key (const char *repo_id, const char *user, GError **error);
int
seafile_revert_on_server (const char *repo_id,
const char *commit_id,
const char *user_name,
GError **error);
/**
* Add a file into the repo on server.
* The content of the file is stored in a temporary file.
* @repo_id: repo id
* @temp_file_path: local file path, should be a temp file just uploaded.
* @parent_dir: the parent directory to put the file in.
* @file_name: the name of the target file.
* @user: the email of the user who uploaded the file.
*/
int
seafile_post_file (const char *repo_id, const char *temp_file_path,
const char *parent_dir, const char *file_name,
const char *user,
GError **error);
/**
* Add multiple files at once.
*
* @filenames_json: json array of filenames
* @paths_json: json array of temp file paths
*/
char *
seafile_post_multi_files (const char *repo_id,
const char *parent_dir,
const char *filenames_json,
const char *paths_json,
const char *user,
int replace,
GError **error);
/**
* Add file blocks at once.
*
* @blocks_json: json array of block ids
* @paths_json: json array of temp file paths
*/
/* char * */
/* seafile_post_file_blocks (const char *repo_id, */
/* const char *parent_dir, */
/* const char *file_name, */
/* const char *blockids_json, */
/* const char *paths_json, */
/* const char *user, */
/* gint64 file_size, */
/* int replace_existed, */
/* GError **error); */
int
seafile_post_empty_file (const char *repo_id, const char *parent_dir,
const char *new_file_name, const char *user,
GError **error);
/**
* Update an existing file in a repo
* @params: same as seafile_post_file
* @head_id: the commit id for the original file version.
* It's optional. If it's NULL, the current repo head will be used.
* @return The new file id
*/
char *
seafile_put_file (const char *repo_id, const char *temp_file_path,
const char *parent_dir, const char *file_name,
const char *user, const char *head_id,
GError **error);
/**
* Add file blocks at once.
*
* @blocks_json: json array of block ids
* @paths_json: json array of temp file paths
*/
/* char * */
/* seafile_put_file_blocks (const char *repo_id, const char *parent_dir, */
/* const char *file_name, const char *blockids_json, */
/* const char *paths_json, const char *user, */
/* const char *head_id, gint64 file_size, GError **error); */
int
seafile_post_dir (const char *repo_id, const char *parent_dir,
const char *new_dir_name, const char *user,
GError **error);
int
seafile_mkdir_with_parents (const char *repo_id, const char *parent_dir,
const char *new_dir_path, const char *user,
GError **error);
/**
* delete a file/directory from the repo on server.
* @repo_id: repo id
* @parent_dir: the parent directory of the file to be deleted
* @file_name: the name of the target file.
* @user: the email of the user who uploaded the file.
*/
int
seafile_del_file (const char *repo_id,
const char *parent_dir, const char *file_name,
const char *user,
GError **error);
int
seafile_batch_del_files (const char *repo_id,
const char *file_list,
const char *user,
GError **error);
/**
* copy a file/directory from a repo to another on server.
*/
GObject *
seafile_copy_file (const char *src_repo_id,
const char *src_dir,
const char *src_filename,
const char *dst_repo_id,
const char *dst_dir,
const char *dst_filename,
const char *user,
int need_progress,
int synchronous,
GError **error);
GObject *
seafile_move_file (const char *src_repo_id,
const char *src_dir,
const char *src_filename,
const char *dst_repo_id,
const char *dst_dir,
const char *dst_filename,
int replace,
const char *user,
int need_progress,
int synchronous,
GError **error);
GObject *
seafile_get_copy_task (const char *task_id, GError **error);
int
seafile_cancel_copy_task (const char *task_id, GError **error);
int
seafile_rename_file (const char *repo_id,
const char *parent_dir,
const char *oldname,
const char *newname,
const char *user,
GError **error);
/**
* Return non-zero if filename is valid.
*/
int
seafile_is_valid_filename (const char *repo_id,
const char *filename,
GError **error);
int
seafile_set_user_quota (const char *user, gint64 quota, GError **error);
gint64
seafile_get_user_quota (const char *user, GError **error);
int
seafile_check_quota (const char *repo_id, gint64 delta, GError **error);
GList *
seafile_list_user_quota_usage (GError **error);
char *
seafile_get_file_id_by_path (const char *repo_id, const char *path,
GError **error);
char *
seafile_get_dir_id_by_path (const char *repo_id, const char *path,
GError **error);
GObject *
seafile_get_dirent_by_path (const char *repo_id, const char *path,
GError **error);
/**
* Return a list of commits where every commit contains a unique version of
* the file.
*/
GList *
seafile_list_file_revisions (const char *repo_id,
const char *commit_id,
const char *path,
int limit,
GError **error);
GList *
seafile_calc_files_last_modified (const char *repo_id,
const char *parent_dir,
int limit,
GError **error);
int
seafile_revert_file (const char *repo_id,
const char *commit_id,
const char *path,
const char *user,
GError **error);
int
seafile_revert_dir (const char *repo_id,
const char *commit_id,
const char *path,
const char *user,
GError **error);
char *
seafile_check_repo_blocks_missing (const char *repo_id,
const char *blockids_json,
GError **error);
/*
* @show_days: return deleted files in how many days, return all if 0.
*/
GList *
seafile_get_deleted (const char *repo_id, int show_days,
const char *path, const char *scan_stat,
int limit, GError **error);
/**
* Generate a new token for (repo_id, email) and return it
*/
char *
seafile_generate_repo_token (const char *repo_id,
const char *email,
GError **error);
int
seafile_delete_repo_token (const char *repo_id,
const char *token,
const char *user,
GError **error);
GList *
seafile_list_repo_tokens (const char *repo_id,
GError **error);
GList *
seafile_list_repo_tokens_by_email (const char *email,
GError **error);
int
seafile_delete_repo_tokens_by_peer_id(const char *email, const char *peer_id, GError **error);
int
seafile_delete_repo_tokens_by_email (const char *email,
GError **error);
/**
* create a repo on seahub
*/
char *
seafile_create_repo (const char *repo_name,
const char *repo_desc,
const char *owner_email,
const char *passwd,
int enc_version,
const char *pwd_hash_algo,
const char *pwd_hash_params,
GError **error);
char *
seafile_create_enc_repo (const char *repo_id,
const char *repo_name,
const char *repo_desc,
const char *owner_email,
const char *magic,
const char *random_key,
const char *salt,
int enc_version,
const char *pwd_hash,
const char *pwd_hash_algo,
const char *pwd_hash_params,
GError **error);
char *
seafile_check_permission (const char *repo_id, const char *user, GError **error);
char *
seafile_check_permission_by_path (const char *repo_id, const char *path,
const char *user, GError **error);
GList *
seafile_list_dir_with_perm (const char *repo_id,
const char *path,
const char *dir_id,
const char *user,
int offset,
int limit,
GError **error);
int
seafile_set_inner_pub_repo (const char *repo_id,
const char *permission,
GError **error);
int
seafile_unset_inner_pub_repo (const char *repo_id, GError **error);
GList *
seafile_list_inner_pub_repos (GError **error);
gint64
seafile_count_inner_pub_repos (GError **error);
GList *
seafile_list_inner_pub_repos_by_owner (const char *user, GError **error);
int
seafile_is_inner_pub_repo (const char *repo_id, GError **error);
int
seafile_set_share_permission (const char *repo_id,
const char *from_email,
const char *to_email,
const char *permission,
GError **error);
int
seafile_set_group_repo_permission (int group_id,
const char *repo_id,
const char *permission,
GError **error);
char *
seafile_get_file_id_by_commit_and_path(const char *repo_id,
const char *commit_id,
const char *path,
GError **error);
/* virtual repo related */
char *
seafile_create_virtual_repo (const char *origin_repo_id,
const char *path,
const char *repo_name,
const char *repo_desc,
const char *owner,
const char *passwd,
GError **error);
GList *
seafile_get_virtual_repos_by_owner (const char *owner, GError **error);
GObject *
seafile_get_virtual_repo (const char *origin_repo,
const char *path,
const char *owner,
GError **error);
char *
seafile_get_system_default_repo_id (GError **error);
/* Clean trash */
int
seafile_clean_up_repo_history (const char *repo_id, int keep_days, GError **error);
/* ------------------ public RPC calls. ------------ */
GList* seafile_get_repo_list_pub (int start, int limit, GError **error);
GObject* seafile_get_repo_pub (const gchar* id, GError **error);
GList* seafile_get_commit_list_pub (const gchar *repo,
int offset,
int limit,
GError **error);
GObject* seafile_get_commit_pub (const gchar *id, GError **error);
char *seafile_diff_pub (const char *repo_id, const char *old, const char *new,
GError **error);
GList * seafile_list_dir_pub (const char *dir_id, GError **error);
GList *
seafile_get_shared_users_for_subdir (const char *repo_id,
const char *path,
const char *from_user,
GError **error);
GList *
seafile_get_shared_groups_for_subdir (const char *repo_id,
const char *path,
const char *from_user,
GError **error);
GObject *
seafile_generate_magic_and_random_key(int enc_version,
const char* repo_id,
const char *passwd,
GError **error);
gint64
seafile_get_total_file_number (GError **error);
gint64
seafile_get_total_storage (GError **error);
GObject *
seafile_get_file_count_info_by_path (const char *repo_id,
const char *path,
GError **error);
char *
seafile_get_trash_repo_owner (const char *repo_id, GError **error);
int
seafile_set_server_config_int (const char *group, const char *key, int value, GError **error);
int
seafile_get_server_config_int (const char *group, const char *key, GError **error);
int
seafile_set_server_config_int64 (const char *group, const char *key, gint64 value, GError **error);
gint64
seafile_get_server_config_int64 (const char *group, const char *key, GError **error);
int
seafile_set_server_config_string (const char *group, const char *key, const char *value, GError **error);
char *
seafile_get_server_config_string (const char *group, const char *key, GError **error);
int
seafile_set_server_config_boolean (const char *group, const char *key, int value, GError **error);
int
seafile_get_server_config_boolean (const char *group, const char *key, GError **error);
GObject *
seafile_get_group_shared_repo_by_path (const char *repo_id,
const char *path,
int group_id,
int is_org,
GError **error);
GObject *
seafile_get_shared_repo_by_path (const char *repo_id,
const char *path,
const char *shared_to,
int is_org,
GError **error);
GList *
seafile_get_group_repos_by_user (const char *user, GError **error);
GList *
seafile_get_org_group_repos_by_user (const char *user, int org_id, GError **error);
int
seafile_repo_has_been_shared (const char *repo_id, int including_groups, GError **error);
GList *
seafile_get_shared_users_by_repo (const char *repo_id, GError **error);
GList *
seafile_org_get_shared_users_by_repo (int org_id,
const char *repo_id,
GError **error);
gint64
seafile_get_upload_tmp_file_offset (const char *repo_id, const char *file_path,
GError **error);
char *
seafile_convert_repo_path (const char *repo_id,
const char *path,
const char *user,
int is_org,
GError **error);
int
seafile_set_repo_status(const char *repo_id, int status, GError **error);
int
seafile_get_repo_status(const char *repo_id, GError **error);
GList*
seafile_get_repos_by_id_prefix (const char *id_prefix, int start,
int limit, GError **error);
int
seafile_publish_event(const char *channel, const char *content, GError **error);
json_t *
seafile_pop_event(const char *channel, GError **error);
GList *
seafile_search_files (const char *repo_id, const char *str, GError **error);
GList *
seafile_search_files_by_path (const char *repo_id, const char *path, const char *str, GError **error);
/*Following is ccnet rpc*/
int
ccnet_rpc_add_emailuser (const char *email, const char *passwd,
int is_staff, int is_active, GError **error);
int
ccnet_rpc_remove_emailuser (const char *source, const char *email, GError **error);
int
ccnet_rpc_validate_emailuser (const char *email, const char *passwd, GError **error);
GObject*
ccnet_rpc_get_emailuser (const char *email, GError **error);
GObject*
ccnet_rpc_get_emailuser_with_import (const char *email, GError **error);
GObject*
ccnet_rpc_get_emailuser_by_id (int id, GError **error);
GList*
ccnet_rpc_get_emailusers (const char *source, int start, int limit, const char *status, GError **error);
GList*
ccnet_rpc_search_emailusers (const char *source,
const char *email_patt,
int start, int limit,
GError **error);
GList*
ccnet_rpc_search_ldapusers (const char *keyword,
int start, int limit,
GError **error);
/* Get total counts of email users. */
gint64
ccnet_rpc_count_emailusers (const char *source, GError **error);
gint64
ccnet_rpc_count_inactive_emailusers (const char *source, GError **error);
int
ccnet_rpc_update_emailuser (const char *source, int id, const char* passwd,
int is_staff, int is_active,
GError **error);
int
ccnet_rpc_update_role_emailuser (const char* email, const char* role, GError **error);
GList*
ccnet_rpc_get_superusers (GError **error);
GList *
ccnet_rpc_get_emailusers_in_list(const char *source, const char *user_list, GError **error);
int
ccnet_rpc_update_emailuser_id (const char *old_email, const char *new_email, GError **error);
int
ccnet_rpc_create_group (const char *group_name, const char *user_name,
const char *type, int parent_group_id, GError **error);
int
ccnet_rpc_create_org_group (int org_id, const char *group_name,
const char *user_name, int parent_group_id, GError **error);
int
ccnet_rpc_remove_group (int group_id, GError **error);
int
ccnet_rpc_group_add_member (int group_id, const char *user_name,
const char *member_name, GError **error);
int
ccnet_rpc_group_remove_member (int group_id, const char *user_name,
const char *member_name, GError **error);
int
ccnet_rpc_group_set_admin (int group_id, const char *member_name,
GError **error);
int
ccnet_rpc_group_unset_admin (int group_id, const char *member_name,
GError **error);
int
ccnet_rpc_set_group_name (int group_id, const char *group_name,
GError **error);
int
ccnet_rpc_quit_group (int group_id, const char *user_name, GError **error);
GList *
ccnet_rpc_get_groups (const char *username, int return_ancestors, GError **error);
GList *
ccnet_rpc_list_all_departments (GError **error);
GList *
ccnet_rpc_get_all_groups (int start, int limit, const char *source, GError **error);
GList *
ccnet_rpc_get_ancestor_groups (int group_id, GError **error);
GList *
ccnet_rpc_get_top_groups (int including_org, GError **error);
GList *
ccnet_rpc_get_child_groups (int group_id, GError **error);
GList *
ccnet_rpc_get_descendants_groups(int group_id, GError **error);
GObject *
ccnet_rpc_get_group (int group_id, GError **error);
GList *
ccnet_rpc_get_group_members (int group_id, int start, int limit, GError **error);
GList *
ccnet_rpc_get_members_with_prefix(int group_id, const char *prefix, GError **error);
int
ccnet_rpc_check_group_staff (int group_id, const char *user_name, int in_structure,
GError **error);
int
ccnet_rpc_remove_group_user (const char *user, GError **error);
int
ccnet_rpc_is_group_user (int group_id, const char *user, int in_structure, GError **error);
int
ccnet_rpc_set_group_creator (int group_id, const char *user_name,
GError **error);
GList*
ccnet_rpc_search_groups (const char *group_patt,
int start, int limit,
GError **error);
GList *
ccnet_rpc_get_groups_members (const char *group_ids, GError **error);
GList *
ccnet_rpc_search_group_members (int group_id, const char *pattern, GError **error);
int
ccnet_rpc_create_org (const char *org_name, const char *url_prefix,
const char *creator, GError **error);
int
ccnet_rpc_remove_org (int org_id, GError **error);
GList *
ccnet_rpc_get_all_orgs (int start, int limit, GError **error);
gint64
ccnet_rpc_count_orgs (GError **error);
GObject *
ccnet_rpc_get_org_by_url_prefix (const char *url_prefix, GError **error);
GObject *
ccnet_rpc_get_org_by_id (int org_id, GError **error);
int
ccnet_rpc_add_org_user (int org_id, const char *email, int is_staff,
GError **error);
int
ccnet_rpc_remove_org_user (int org_id, const char *email, GError **error);
GList *
ccnet_rpc_get_orgs_by_user (const char *email, GError **error);
GList *
ccnet_rpc_get_org_emailusers (const char *url_prefix, int start , int limit,
GError **error);
int
ccnet_rpc_add_org_group (int org_id, int group_id, GError **error);
int
ccnet_rpc_remove_org_group (int org_id, int group_id, GError **error);
int
ccnet_rpc_is_org_group (int group_id, GError **error);
int
ccnet_rpc_get_org_id_by_group (int group_id, GError **error);
GList *
ccnet_rpc_get_org_groups (int org_id, int start, int limit, GError **error);
GList *
ccnet_rpc_get_org_groups_by_user (const char *user, int org_id, GError **error);
GList *
ccnet_rpc_get_org_top_groups (int org_id, GError **error);
int
ccnet_rpc_org_user_exists (int org_id, const char *email, GError **error);
int
ccnet_rpc_is_org_staff (int org_id, const char *email, GError **error);
int
ccnet_rpc_set_org_staff (int org_id, const char *email, GError **error);
int
ccnet_rpc_unset_org_staff (int org_id, const char *email, GError **error);
int
ccnet_rpc_set_org_name (int org_id, const char *org_name, GError **error);
int
ccnet_rpc_set_reference_id (const char *primary_id, const char *reference_id, GError **error);
char *
ccnet_rpc_get_primary_id (const char *email, GError **error);
#endif
================================================
FILE: lib/Makefile.am
================================================
pcfiles = libseafile.pc
pkgconfig_DATA = $(pcfiles)
pkgconfigdir = $(libdir)/pkgconfig
AM_CPPFLAGS = @GLIB2_CFLAGS@ -I$(top_srcdir)/include \
-I$(top_srcdir)/lib \
-I$(top_srcdir)/common \
@SEARPC_CFLAGS@ \
@MSVC_CFLAGS@ \
-Wall
BUILT_SOURCES = gensource
## source file rules
seafile_object_define = repo.vala commit.vala dirent.vala dir.vala \
task.vala branch.vala crypt.vala webaccess.vala seahub.vala copy-task.vala ccnetobj.vala search-result.vala
seafile_object_gen = $(seafile_object_define:.vala=.c)
valac_gen = ${seafile_object_gen} seafile-object.h
EXTRA_DIST = ${seafile_object_define} rpc_table.py $(pcfiles) vala.stamp
utils_headers = net.h bloom-filter.h utils.h db.h job-mgr.h timer.h
utils_srcs = $(utils_headers:.h=.c)
noinst_HEADERS = ${utils_headers} include.h
seafiledir = $(includedir)/seafile
seafile_HEADERS = seafile-object.h
seafile-object.h: ${seafile_object_define}
rm -f $@
valac --pkg posix ${seafile_object_define} -C -H seafile-object.h
DISTCLEANFILES = ${searpc_gen}
## library rules
noinst_LTLIBRARIES = libseafile_common.la
libseafile_common_la_SOURCES = ${seafile_object_gen} ${utils_srcs}
libseafile_common_la_LDFLAGS = -no-undefined
libseafile_common_la_LIBADD = @GLIB2_LIBS@ @GOBJECT_LIBS@ @SSL_LIBS@ -lcrypto @LIB_GDI32@ \
@LIB_UUID@ @LIB_WS32@ @LIB_PSAPI@ -lsqlite3 \
@LIBEVENT_LIBS@ @SEARPC_LIBS@ @LIB_SHELL32@ \
@ZLIB_LIBS@
searpc_gen = searpc-signature.h searpc-marshal.h
gensource: ${searpc_gen} ${valac_gen}
rpc_table.stamp: ${top_srcdir}/lib/rpc_table.py
@rm -f rpc_table.tmp
@touch rpc_table.tmp
@echo "[libsearpc]: generating rpc header files"
@PYTHON@ `which searpc-codegen.py` ${top_srcdir}/lib/rpc_table.py
@echo "[libsearpc]: done"
@mv -f rpc_table.tmp $@
${searpc_gen}: rpc_table.stamp
vala.stamp: ${seafile_object_define}
rm -f ${seafile_object_gen}
@rm -f vala.tmp
@touch vala.tmp
valac -C --pkg posix $^
@mv -f vala.tmp $@
${seafile_object_gen}: vala.stamp
clean-local:
rm -f ${searpc_gen}
rm -f rpc_table.pyc
rm -f rpc_table.stamp
rm -f rpc_table.tmp
rm -f vala.tmp vala.stamp ${valac_gen}
install-data-local:
if MACOS
sed -i '' -e "s|(DESTDIR)|${DESTDIR}|g" $(pcfiles)
else
${SED} -i "s|(DESTDIR)|${DESTDIR}|g" $(pcfiles)
endif
================================================
FILE: lib/bloom-filter.c
================================================
/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
#include
#include
#include
#include
#include
#include "bloom-filter.h"
#define SETBIT(a, n) (a[n/CHAR_BIT] |= (1<<(n%CHAR_BIT)))
#define CLEARBIT(a, n) (a[n/CHAR_BIT] &= ~(1<<(n%CHAR_BIT)))
#define GETBIT(a, n) (a[n/CHAR_BIT] & (1<<(n%CHAR_BIT)))
Bloom* bloom_create(size_t size, int k, int counting)
{
Bloom *bloom;
size_t csize = 0;
if (k <=0 || k > 4) return NULL;
if ( !(bloom = malloc(sizeof(Bloom))) ) return NULL;
if ( !(bloom->a = calloc((size+CHAR_BIT-1)/CHAR_BIT, sizeof(char))) )
{
free (bloom);
return NULL;
}
if (counting) {
csize = size*4;
bloom->counters = calloc((csize+CHAR_BIT-1)/CHAR_BIT, sizeof(char));
if (!bloom->counters) {
free (bloom);
return NULL;
}
}
bloom->asize = size;
bloom->csize = csize;
bloom->k = k;
bloom->counting = counting;
return bloom;
}
int bloom_destroy(Bloom *bloom)
{
free (bloom->a);
if (bloom->counting) free (bloom->counters);
free (bloom);
return 0;
}
static void
incr_bit (Bloom *bf, unsigned int bit_idx)
{
unsigned int char_idx, offset;
unsigned char value;
unsigned int high;
unsigned int low;
SETBIT (bf->a, bit_idx);
if (!bf->counting) return;
char_idx = bit_idx / 2;
offset = bit_idx % 2;
value = bf->counters[char_idx];
low = value & 0xF;
high = (value & 0xF0) >> 4;
if (offset == 0) {
if (low < 0xF)
low++;
} else {
if (high < 0xF)
high++;
}
value = ((high << 4) | low);
bf->counters[char_idx] = value;
}
static void
decr_bit (Bloom *bf, unsigned int bit_idx)
{
unsigned int char_idx, offset;
unsigned char value;
unsigned int high;
unsigned int low;
if (!bf->counting) {
CLEARBIT (bf->a, bit_idx);
return;
}
char_idx = bit_idx / 2;
offset = bit_idx % 2;
value = bf->counters[char_idx];
low = value & 0xF;
high = (value & 0xF0) >> 4;
/* decrement, but once we have reached the max, never go back! */
if (offset == 0) {
if ((low > 0) && (low < 0xF))
low--;
if (low == 0) {
CLEARBIT (bf->a, bit_idx);
}
} else {
if ((high > 0) && (high < 0xF))
high--;
if (high == 0) {
CLEARBIT (bf->a, bit_idx);
}
}
value = ((high << 4) | low);
bf->counters[char_idx] = value;
}
int bloom_add(Bloom *bloom, const char *s)
{
int i;
SHA256_CTX c;
unsigned char sha256[SHA256_DIGEST_LENGTH];
size_t *sha_int = (size_t *)&sha256;
SHA256_Init(&c);
SHA256_Update(&c, s, strlen(s));
SHA256_Final (sha256, &c);
for (i=0; i < bloom->k; ++i)
incr_bit (bloom, sha_int[i] % bloom->asize);
return 0;
}
int bloom_remove(Bloom *bloom, const char *s)
{
int i;
SHA256_CTX c;
unsigned char sha256[SHA256_DIGEST_LENGTH];
size_t *sha_int = (size_t *)&sha256;
if (!bloom->counting)
return -1;
SHA256_Init(&c);
SHA256_Update(&c, s, strlen(s));
SHA256_Final (sha256, &c);
for (i=0; i < bloom->k; ++i)
decr_bit (bloom, sha_int[i] % bloom->asize);
return 0;
}
int bloom_test(Bloom *bloom, const char *s)
{
int i;
SHA256_CTX c;
unsigned char sha256[SHA256_DIGEST_LENGTH];
size_t *sha_int = (size_t *)&sha256;
SHA256_Init(&c);
SHA256_Update(&c, s, strlen(s));
SHA256_Final (sha256, &c);
for (i=0; i < bloom->k; ++i)
if(!(GETBIT(bloom->a, sha_int[i] % bloom->asize))) return 0;
return 1;
}
================================================
FILE: lib/bloom-filter.h
================================================
/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
#ifndef __BLOOM_H__
#define __BLOOM_H__
#include
typedef struct {
size_t asize;
unsigned char *a;
size_t csize;
unsigned char *counters;
int k;
char counting:1;
} Bloom;
Bloom *bloom_create (size_t size, int k, int counting);
int bloom_destroy (Bloom *bloom);
int bloom_add (Bloom *bloom, const char *s);
int bloom_remove (Bloom *bloom, const char *s);
int bloom_test (Bloom *bloom, const char *s);
#endif
================================================
FILE: lib/branch.vala
================================================
// compile this file with `valac --pkg posix repo.vala -C -H repo.h`
namespace Seafile {
public class Branch : Object {
public string _name;
public string name {
get { return _name; }
set { _name = value; }
}
public string _commit_id;
public string commit_id {
get { return _commit_id; }
set { _commit_id = value; }
}
public string _repo_id;
public string repo_id {
get { return _repo_id; }
set { _repo_id = value; }
}
}
} // namespace
================================================
FILE: lib/ccnetobj.vala
================================================
namespace Ccnet {
public class EmailUser : Object {
public int id { get; set; }
public string email { get; set; }
public bool is_staff { get; set; }
public bool is_active { get; set; }
public int64 ctime { get; set; }
public string source { get; set; }
public string role { get; set; }
public string password { get; set; }
public string reference_id { get; set; }
}
public class Group : Object {
public int id { get; set; }
public string group_name { get; set; }
public string creator_name { get; set; }
public int64 timestamp { get; set; }
public string source { get; set; }
public int parent_group_id { get; set; }
}
public class GroupUser : Object {
public int group_id { get; set; }
public string user_name { get; set; }
public int is_staff { get; set; }
}
public class Organization : Object {
public int org_id { get; set; }
public string email { get; set; }
public int is_staff { get; set; }
public string org_name { get; set; }
public string url_prefix { get; set; }
public string creator { get; set; }
public int64 ctime { get; set; }
}
} // namespace
================================================
FILE: lib/commit.vala
================================================
// compile this file with `valac --pkg posix repo.vala -C -H repo.h`
namespace Seafile {
public class Commit : Object {
// _id is for fast access from c code. id is for
// vala to automatically generate a property. Note,
// if a Vala property is start with _, it is not
// translated into a GObject property.
public char _id[41];
public string id {
get { return (string)_id; }
set { Posix.memcpy(_id, value, 40); _id[40] = '\0'; }
}
public string creator_name { get; set; }
public string _creator; // creator
public string creator {
get { return _creator; }
set { _creator = value; }
}
public string _desc; // description: what does this commit change
public string desc {
get { return _desc; }
set { _desc = value; }
}
public int64 _ctime; // create time
public int64 ctime {
get { return _ctime; }
set { _ctime = value; }
}
public string parent_id { get; set;}
public string second_parent_id { get; set; }
public string _repo_id;
public string repo_id {
get { return _repo_id; }
set { _repo_id = value; }
}
// A commit point to a file or dir, not both.
public string _root_id;
public string root_id {
get { return _root_id; }
set { _root_id = value; }
}
// Repo data-format version of this commit
public int version { get; set; }
public bool new_merge { get; set; }
public bool conflict { get; set; }
// Used for returning file revision
public string rev_file_id { get; set; }
public int64 rev_file_size { get; set; }
// Set if this commit renames a revision of a file
public string rev_renamed_old_path { get; set; }
public string device_name { get; set; }
public string client_version { get; set; }
//Only used for file history pagination
public string next_start_commit { get; set; }
}
} // namespace
================================================
FILE: lib/copy-task.vala
================================================
namespace Seafile {
public class CopyTask : Object {
public int64 done { set; get; }
public int64 total { set; get; }
public bool canceled { set; get; }
public bool failed { set; get; }
public string failed_reason { set; get; }
public bool successful { set; get; }
}
public class CopyResult : Object {
public bool background { set; get; }
public string task_id { set; get; }
}
}
================================================
FILE: lib/crypt.vala
================================================
namespace Seafile {
public class CryptKey : Object {
public string key { set; get; }
public string iv { set; get; }
}
}
================================================
FILE: lib/db.c
================================================
#include
#include
#include "db.h"
int
sqlite_open_db (const char *db_path, sqlite3 **db)
{
int result;
const char *errmsg;
result = sqlite3_open (db_path, db);
if (result) {
errmsg = sqlite3_errmsg (*db);
g_warning ("Couldn't open database:'%s', %s\n",
db_path, errmsg ? errmsg : "no error given");
sqlite3_close (*db);
return -1;
}
return 0;
}
int sqlite_close_db (sqlite3 *db)
{
return sqlite3_close (db);
}
sqlite3_stmt *
sqlite_query_prepare (sqlite3 *db, const char *sql)
{
sqlite3_stmt *stmt;
int result;
result = sqlite3_prepare_v2 (db, sql, -1, &stmt, NULL);
if (result != SQLITE_OK) {
const gchar *str = sqlite3_errmsg (db);
g_warning ("Couldn't prepare query, error:%d->'%s'\n\t%s\n",
result, str ? str : "no error given", sql);
return NULL;
}
return stmt;
}
int
sqlite_query_exec (sqlite3 *db, const char *sql)
{
char *errmsg = NULL;
int result;
result = sqlite3_exec (db, sql, NULL, NULL, &errmsg);
if (result != SQLITE_OK) {
if (errmsg != NULL) {
g_warning ("SQL error: %d - %s\n:\t%s\n", result, errmsg, sql);
sqlite3_free (errmsg);
}
return -1;
}
return 0;
}
int
sqlite_begin_transaction (sqlite3 *db)
{
char *sql = "BEGIN TRANSACTION;";
return sqlite_query_exec (db, sql);
}
int
sqlite_end_transaction (sqlite3 *db)
{
char *sql = "END TRANSACTION;";
return sqlite_query_exec (db, sql);
}
gboolean
sqlite_check_for_existence (sqlite3 *db, const char *sql)
{
sqlite3_stmt *stmt;
int result;
stmt = sqlite_query_prepare (db, sql);
if (!stmt)
return FALSE;
result = sqlite3_step (stmt);
if (result == SQLITE_ERROR) {
const gchar *str = sqlite3_errmsg (db);
g_warning ("Couldn't execute query, error: %d->'%s'\n",
result, str ? str : "no error given");
sqlite3_finalize (stmt);
return FALSE;
}
sqlite3_finalize (stmt);
if (result == SQLITE_ROW)
return TRUE;
return FALSE;
}
int
sqlite_foreach_selected_row (sqlite3 *db, const char *sql,
SqliteRowFunc callback, void *data)
{
sqlite3_stmt *stmt;
int result;
int n_rows = 0;
stmt = sqlite_query_prepare (db, sql);
if (!stmt) {
return -1;
}
while (1) {
result = sqlite3_step (stmt);
if (result != SQLITE_ROW)
break;
n_rows++;
if (!callback (stmt, data))
break;
}
if (result == SQLITE_ERROR) {
const gchar *s = sqlite3_errmsg (db);
g_warning ("Couldn't execute query, error: %d->'%s'\n",
result, s ? s : "no error given");
sqlite3_finalize (stmt);
return -1;
}
sqlite3_finalize (stmt);
return n_rows;
}
int sqlite_get_int (sqlite3 *db, const char *sql)
{
int ret = -1;
int result;
sqlite3_stmt *stmt;
if ( !(stmt = sqlite_query_prepare(db, sql)) )
return 0;
result = sqlite3_step (stmt);
if (result == SQLITE_ROW) {
ret = sqlite3_column_int (stmt, 0);
sqlite3_finalize (stmt);
return ret;
}
if (result == SQLITE_ERROR) {
const gchar *str = sqlite3_errmsg (db);
g_warning ("Couldn't execute query, error: %d->'%s'\n",
result, str ? str : "no error given");
sqlite3_finalize (stmt);
return -1;
}
sqlite3_finalize(stmt);
return ret;
}
gint64 sqlite_get_int64 (sqlite3 *db, const char *sql)
{
gint64 ret = -1;
int result;
sqlite3_stmt *stmt;
if ( !(stmt = sqlite_query_prepare(db, sql)) )
return 0;
result = sqlite3_step (stmt);
if (result == SQLITE_ROW) {
ret = sqlite3_column_int64 (stmt, 0);
sqlite3_finalize (stmt);
return ret;
}
if (result == SQLITE_ERROR) {
const gchar *str = sqlite3_errmsg (db);
g_warning ("Couldn't execute query, error: %d->'%s'\n",
result, str ? str : "no error given");
sqlite3_finalize (stmt);
return -1;
}
sqlite3_finalize(stmt);
return ret;
}
char *sqlite_get_string (sqlite3 *db, const char *sql)
{
const char *res = NULL;
int result;
sqlite3_stmt *stmt;
char *ret;
if ( !(stmt = sqlite_query_prepare(db, sql)) )
return NULL;
result = sqlite3_step (stmt);
if (result == SQLITE_ROW) {
res = (const char *)sqlite3_column_text (stmt, 0);
ret = g_strdup(res);
sqlite3_finalize (stmt);
return ret;
}
if (result == SQLITE_ERROR) {
const gchar *str = sqlite3_errmsg (db);
g_warning ("Couldn't execute query, error: %d->'%s'\n",
result, str ? str : "no error given");
sqlite3_finalize (stmt);
return NULL;
}
sqlite3_finalize(stmt);
return NULL;
}
================================================
FILE: lib/db.h
================================================
/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
#ifndef DB_UTILS_H
#define DB_UTILS_H
#include
int sqlite_open_db (const char *db_path, sqlite3 **db);
int sqlite_close_db (sqlite3 *db);
sqlite3_stmt *sqlite_query_prepare (sqlite3 *db, const char *sql);
int sqlite_query_exec (sqlite3 *db, const char *sql);
int sqlite_begin_transaction (sqlite3 *db);
int sqlite_end_transaction (sqlite3 *db);
gboolean sqlite_check_for_existence (sqlite3 *db, const char *sql);
typedef gboolean (*SqliteRowFunc) (sqlite3_stmt *stmt, void *data);
int
sqlite_foreach_selected_row (sqlite3 *db, const char *sql,
SqliteRowFunc callback, void *data);
int sqlite_get_int (sqlite3 *db, const char *sql);
gint64 sqlite_get_int64 (sqlite3 *db, const char *sql);
char *sqlite_get_string (sqlite3 *db, const char *sql);
#endif
================================================
FILE: lib/dir.vala
================================================
namespace Seafile {
public class Dir : Object {
// _id is for fast access from c code. id is for
// vala to automatically generate a property. Note,
// if a Vala property is start with _, it is not
// translated into a GObject property.
public char _id[41];
public string id {
get { return (string)_id; }
set { Posix.memcpy(_id, value, 40); _id[40] = '\0'; }
}
public List entries;
public int version { set; get; }
}
public class FileCountInfo : Object {
public int64 file_count { set; get; }
public int64 dir_count { set; get; }
public int64 size { set; get; }
}
} // namespace
================================================
FILE: lib/dirent.vala
================================================
namespace Seafile {
public class Dirent : Object {
// _id is for fast access from c code. id is for
// vala to automatically generate a property. Note,
// if a Vala property is start with _, it is not
// translated into a GObject property.
public string obj_id { set; get; }
public string obj_name { set; get; }
public int mode { set; get; }
public int version { set; get; }
public int64 mtime { set; get; }
public int64 size { set; get; }
public string modifier { set; get;}
public string permission { set; get; }
public bool is_locked { set; get; }
public string lock_owner { set; get; }
public int64 lock_time { set; get; }
public bool is_shared { set; get; }
}
public class FileLastModifiedInfo : Object {
public string file_name { set; get; }
public int64 last_modified { set; get; }
}
} // namespace
================================================
FILE: lib/file.vala
================================================
namespace Seafile {
public class File : Object {
// _id is for fast access from c code. id is for
// vala to automatically generate a property. Note,
// if a Vala property is start with _, it is not
// translated into a GObject property.
public char _id[41];
public string id {
get { return (string)_id; }
set { Posix.memcpy(_id, id, 40); _id[40] = '\0'; }
}
public uint64 size;
}
} // namespace
================================================
FILE: lib/include.h
================================================
#include
#include
#include
#include
#include
#include
#include
#include "utils.h"
#ifndef ccnet_warning
#define ccnet_warning(fmt, ...) g_warning( "%s: " fmt, __func__ , ##__VA_ARGS__)
#endif
#ifndef ccnet_error
#define ccnet_error(fmt, ...) g_error( "%s: " fmt, __func__ , ##__VA_ARGS__)
#endif
#ifndef ccnet_message
#define ccnet_message(fmt, ...) g_message(fmt, ##__VA_ARGS__)
#endif
#ifndef ccnet_debug
#define ccnet_debug(fmt, ...) g_debug(fmt, ##__VA_ARGS__)
#endif
#ifndef ENABLE_DEBUG
#undef g_debug
#define g_debug(...)
#endif
================================================
FILE: lib/job-mgr.c
================================================
/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
#if defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__)
#include
#include
#else
#include
#endif
#include
#include
#include
#include
#define MAX_THREADS 50
#define MAX_IDLE_THREADS 10
#include "utils.h"
#include "job-mgr.h"
struct _CcnetJob {
CcnetJobManager *manager;
int id;
ccnet_pipe_t pipefd[2];
JobThreadFunc thread_func;
JobDoneCallback done_func; /* called when the thread is done */
void *data;
/* the done callback should only access this field */
void *result;
};
void
ccnet_job_manager_remove_job (CcnetJobManager *mgr, int job_id);
static void
job_thread_wrapper (void *vdata, void *unused)
{
CcnetJob *job = vdata;
job->result = job->thread_func (job->data);
if (pipewriten (job->pipefd[1], "a", 1) != 1) {
g_warning ("[Job Manager] write to pipe error: %s\n", strerror(errno));
}
}
static void
job_done_cb (evutil_socket_t fd, short event, void *vdata)
{
CcnetJob *job = vdata;
char buf[1];
if (pipereadn (job->pipefd[0], buf, 1) != 1) {
g_warning ("[Job Manager] read pipe error: %s\n", strerror(errno));
}
pipeclose (job->pipefd[0]);
pipeclose (job->pipefd[1]);
if (job->done_func) {
job->done_func (job->result);
}
ccnet_job_manager_remove_job (job->manager, job->id);
}
int
job_thread_create (CcnetJob *job)
{
if (ccnet_pipe (job->pipefd) < 0) {
g_warning ("pipe error: %s\n", strerror(errno));
return -1;
}
g_thread_pool_push (job->manager->thread_pool, job, NULL);
#ifndef UNIT_TEST
event_once (job->pipefd[0], EV_READ, job_done_cb, job, NULL);
#endif
return 0;
}
CcnetJob *
ccnet_job_new ()
{
CcnetJob *job;
job = g_new0 (CcnetJob, 1);
return job;
}
void
ccnet_job_free (CcnetJob *job)
{
g_free (job);
}
CcnetJobManager *
ccnet_job_manager_new (int max_threads)
{
CcnetJobManager *mgr;
mgr = g_new0 (CcnetJobManager, 1);
mgr->jobs = g_hash_table_new_full (g_direct_hash, g_direct_equal,
NULL, (GDestroyNotify)ccnet_job_free);
mgr->thread_pool = g_thread_pool_new (job_thread_wrapper,
NULL,
max_threads,
FALSE,
NULL);
/* g_thread_pool_set_max_unused_threads (MAX_IDLE_THREADS); */
return mgr;
}
void
ccnet_job_manager_free (CcnetJobManager *mgr)
{
g_hash_table_destroy (mgr->jobs);
g_thread_pool_free (mgr->thread_pool, TRUE, FALSE);
g_free (mgr);
}
int
ccnet_job_manager_schedule_job (CcnetJobManager *mgr,
JobThreadFunc func,
JobDoneCallback done_func,
void *data)
{
CcnetJob *job = ccnet_job_new ();
job->id = mgr->next_job_id++;
job->manager = mgr;
job->thread_func = func;
job->done_func = done_func;
job->data = data;
g_hash_table_insert (mgr->jobs, (gpointer)(long)job->id, job);
if (job_thread_create (job) < 0) {
g_hash_table_remove (mgr->jobs, (gpointer)(long)job->id);
return -1;
}
return job->id;
}
void
ccnet_job_manager_remove_job (CcnetJobManager *mgr, int job_id)
{
g_hash_table_remove (mgr->jobs, (gpointer)(long)job_id);
}
#ifdef UNIT_TEST
void
ccnet_job_manager_wait_job (CcnetJobManager *mgr, int job_id)
{
CcnetJob *job;
job = g_hash_table_lookup (mgr->jobs, (gpointer)(long)job_id);
/* manually call job_done_cb */
job_done_cb (0, 0, (void *)job);
}
#endif
================================================
FILE: lib/job-mgr.h
================================================
/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
/**
* Job Manager manages long term jobs. These jobs are run in their
* own threads.
*/
#ifndef JOB_MGR_H
#define JOB_MGR_H
#include
struct _CcnetSession;
typedef struct _CcnetJob CcnetJob;
typedef struct _CcnetJobManager CcnetJobManager;
/*
The thread func should return the result back by
return (void *)result;
The result will be passed to JobDoneCallback.
*/
typedef void* (*JobThreadFunc)(void *data);
typedef void (*JobDoneCallback)(void *result);
struct _CcnetJobManager {
GHashTable *jobs;
GThreadPool *thread_pool;
int next_job_id;
};
void
ccnet_job_cancel (CcnetJob *job);
CcnetJobManager *
ccnet_job_manager_new (int max_threads);
void
ccnet_job_manager_free (CcnetJobManager *mgr);
int
ccnet_job_manager_schedule_job (CcnetJobManager *mgr,
JobThreadFunc func,
JobDoneCallback done_func,
void *data);
/**
* Wait a specific job to be done.
*/
void
ccnet_job_manager_wait_job (CcnetJobManager *mgr, int job_id);
#endif
================================================
FILE: lib/libseafile.pc.in
================================================
prefix=(DESTDIR)@prefix@
exec_prefix=@exec_prefix@
libdir=@libdir@
includedir=@includedir@
Name: libseafile
Description: Client library for accessing seafile service.
Version: @VERSION@
Libs: -L${libdir} -lseafile @SEARPC_LIBS@
Cflags: -I${includedir} @SEARPC_CFLAGS@
Requires: gobject-2.0 glib-2.0
================================================
FILE: lib/net.c
================================================
/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
#ifdef WIN32
#define WINVER 0x0501
#include
#include
#include
#include
#endif
#include "include.h"
#include
#include
#include
#include
#include
#ifdef WIN32
#define UNUSED
#else
#include
#include
#include
#include
#include
#include
#include
#include
#include
#endif
#include
#if defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__)
#include
#else
#include
#endif
#include "net.h"
#ifdef WIN32
#ifndef inet_aton
int inet_aton(const char *string, struct in_addr *addr)
{
addr->s_addr = inet_addr(string);
if (addr->s_addr != -1 || strcmp("255.255.255.255", string) == 0)
return 1;
return 0;
}
#endif
#endif //WIN32
int
ccnet_netSetTOS (evutil_socket_t s, int tos)
{
#ifdef IP_TOS
return setsockopt( s, IPPROTO_IP, IP_TOS, (char*)&tos, sizeof( tos ) );
#else
return 0;
#endif
}
static evutil_socket_t
makeSocketNonBlocking (evutil_socket_t fd)
{
if (fd >= 0)
{
if (evutil_make_socket_nonblocking(fd))
{
ccnet_warning ("Couldn't make socket nonblock: %s",
evutil_socket_error_to_string(EVUTIL_SOCKET_ERROR()));
evutil_closesocket(fd);
fd = -1;
}
}
return fd;
}
static evutil_socket_t
createSocket (int family, int nonblock)
{
evutil_socket_t fd;
int ret;
fd = socket (family, SOCK_STREAM, 0);
if (fd < 0) {
ccnet_warning("create Socket failed %d\n", fd);
} else if (nonblock) {
int nodelay = 1;
fd = makeSocketNonBlocking( fd );
ret = setsockopt (fd, IPPROTO_TCP, TCP_NODELAY,
(char *)&nodelay, sizeof(nodelay));
if (ret < 0) {
ccnet_warning("setsockopt failed\n");
evutil_closesocket(fd);
return -1;
}
}
return fd;
}
evutil_socket_t
ccnet_net_open_tcp (const struct sockaddr *sa, int nonblock)
{
evutil_socket_t s;
int sa_len;
if( (s = createSocket(sa->sa_family, nonblock)) < 0 )
return -1;
#ifndef WIN32
if (sa->sa_family == AF_INET)
sa_len = sizeof (struct sockaddr_in);
else
sa_len = sizeof (struct sockaddr_in6);
#else
if (sa->sa_family == AF_INET)
sa_len = sizeof (struct sockaddr_in);
else
return -1;
#endif
if( (connect(s, sa, sa_len) < 0)
#ifdef WIN32
&& (sockerrno != WSAEWOULDBLOCK)
#endif
&& (sockerrno != EINPROGRESS) )
{
evutil_closesocket(s);
s = -1;
}
return s;
}
evutil_socket_t
ccnet_net_bind_tcp (int port, int nonblock)
{
#ifndef WIN32
int sockfd, n;
struct addrinfo hints, *res, *ressave;
char buf[10];
memset (&hints, 0,sizeof (struct addrinfo));
hints.ai_flags = AI_PASSIVE;
hints.ai_family = AF_UNSPEC;
hints.ai_socktype = SOCK_STREAM;
snprintf (buf, sizeof(buf), "%d", port);
if ( (n = getaddrinfo(NULL, buf, &hints, &res) ) != 0) {
ccnet_warning ("getaddrinfo fails: %s\n", gai_strerror(n));
return -1;
}
ressave = res;
do {
int on = 1;
sockfd = socket(res->ai_family, res->ai_socktype, res->ai_protocol);
if (sockfd < 0)
continue; /* error - try next one */
if (setsockopt(sockfd, SOL_SOCKET, SO_REUSEADDR, &on, sizeof(on)) < 0) {
ccnet_warning ("setsockopt of SO_REUSEADDR error\n");
continue;
}
if (nonblock)
sockfd = makeSocketNonBlocking (sockfd);
if (sockfd < 0)
continue; /* error - try next one */
if (bind(sockfd, res->ai_addr, res->ai_addrlen) == 0)
break; /* success */
close(sockfd); /* bind error - close and try next one */
} while ( (res = res->ai_next) != NULL);
freeaddrinfo (ressave);
if (res == NULL) {
ccnet_warning ("bind fails: %s\n", strerror(errno));
return -1;
}
return sockfd;
#else
evutil_socket_t s;
struct sockaddr_in sock;
const int type = AF_INET;
#if defined( SO_REUSEADDR ) || defined( SO_REUSEPORT )
int optval;
#endif
if ((s = createSocket(type, nonblock)) < 0)
return -1;
optval = 1;
setsockopt (s, SOL_SOCKET, SO_REUSEADDR, (char*)&optval, sizeof(optval));
memset(&sock, 0, sizeof(sock));
sock.sin_family = AF_INET;
sock.sin_addr.s_addr = INADDR_ANY;
sock.sin_port = htons(port);
if ( bind(s, (struct sockaddr *)&sock, sizeof(struct sockaddr_in)) < 0)
{
ccnet_warning ("bind fails: %s\n", strerror(errno));
evutil_closesocket (s);
return -1;
}
if (nonblock)
s = makeSocketNonBlocking (s);
return s;
#endif
}
int
ccnet_net_make_socket_blocking(evutil_socket_t fd)
{
#ifdef WIN32
{
u_long nonblocking = 0;
if (ioctlsocket(fd, FIONBIO, &nonblocking) == SOCKET_ERROR) {
ccnet_warning ("fcntl(%d, F_GETFL)", (int)fd);
return -1;
}
}
#else
{
int flags;
if ((flags = fcntl(fd, F_GETFL, NULL)) < 0) {
ccnet_warning ("fcntl(%d, F_GETFL)", fd);
return -1;
}
if (fcntl(fd, F_SETFL, flags & ~O_NONBLOCK) == -1) {
ccnet_warning ("fcntl(%d, F_SETFL)", fd);
return -1;
}
}
#endif
return 0;
}
evutil_socket_t
ccnet_net_accept (evutil_socket_t b, struct sockaddr_storage *cliaddr,
socklen_t *len, int nonblock)
{
evutil_socket_t s;
/* int nodelay = 1; */
s = accept (b, (struct sockaddr *)cliaddr, len);
/* setsockopt (s, IPPROTO_TCP, TCP_NODELAY, &nodelay, sizeof(nodelay)); */
if (nonblock)
makeSocketNonBlocking(s);
return s;
}
evutil_socket_t
ccnet_net_bind_v4 (const char *ipaddr, int *port)
{
evutil_socket_t sockfd;
struct sockaddr_in addr;
int on = 1;
sockfd = socket (AF_INET, SOCK_STREAM, 0);
if (sockfd < 0) {
ccnet_warning("create socket failed: %s\n", strerror(errno));
exit(-1);
}
memset (&addr, 0, sizeof (struct sockaddr_in));
addr.sin_family = AF_INET;
if (inet_aton(ipaddr, &addr.sin_addr) == 0) {
ccnet_warning ("Bad ip address %s\n", ipaddr);
return -1;
}
addr.sin_port = htons (*port);
if (setsockopt(sockfd, SOL_SOCKET, SO_REUSEADDR, (char *)&on, sizeof(on)) < 0)
{
ccnet_warning ("setsockopt of SO_REUSEADDR error: %s\n",
strerror(errno));
return -1;
}
if ( bind(sockfd, (struct sockaddr *)&addr, sizeof(addr)) < 0) {
ccnet_warning ("Bind error: %s\n", strerror (errno));
return -1;
}
if (*port == 0) {
struct sockaddr_storage ss;
socklen_t len;
len = sizeof(ss);
if (getsockname(sockfd, (struct sockaddr *)&ss, &len) < 0) {
ccnet_warning ("getsockname error: %s\n", strerror(errno));
return -1;
}
*port = sock_port ((struct sockaddr *)&ss);
}
return sockfd;
}
char *
sock_ntop(const struct sockaddr *sa, socklen_t salen)
{
static char str[128]; /* Unix domain is largest */
switch (sa->sa_family) {
case AF_INET: {
struct sockaddr_in *sin = (struct sockaddr_in *) sa;
if (evutil_inet_ntop(AF_INET, &sin->sin_addr, str, sizeof(str)) == NULL)
return(NULL);
return(str);
}
#ifdef IPv6
case AF_INET6: {
struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) sa;
if (evutil_inet_ntop(AF_INET6, &sin6->sin6_addr, str, sizeof(str) - 1) == NULL)
return(NULL);
return (str);
}
#endif
#ifndef WIN32
#ifdef AF_UNIX
case AF_UNIX: {
struct sockaddr_un *unp = (struct sockaddr_un *) sa;
/* OK to have no pathname bound to the socket: happens on
every connect() unless client calls bind() first. */
if (unp->sun_path[0] == 0)
strcpy(str, "(no pathname bound)");
else
snprintf(str, sizeof(str), "%s", unp->sun_path);
return(str);
}
#endif
#endif
default:
snprintf(str, sizeof(str), "sock_ntop: unknown AF_xxx: %d, len %d",
sa->sa_family, salen);
return(str);
}
return (NULL);
}
int
sock_pton (const char *addr_str, uint16_t port, struct sockaddr_storage *sa)
{
struct sockaddr_in *saddr = (struct sockaddr_in *) sa;
#ifndef WIN32
struct sockaddr_in6 *saddr6 = (struct sockaddr_in6 *) sa;
#endif
if (evutil_inet_pton (AF_INET, addr_str, &saddr->sin_addr) == 1 ) {
saddr->sin_family = AF_INET;
saddr->sin_port = htons (port);
return 0;
}
#ifndef WIN32
else if (evutil_inet_pton (AF_INET6, addr_str, &saddr6->sin6_addr) == 1)
{
saddr6->sin6_family = AF_INET6;
saddr6->sin6_port = htons (port);
return 0;
}
#endif
return -1;
}
/* return 1 if addr_str is a valid ipv4 or ipv6 address */
int
is_valid_ipaddr (const char *addr_str)
{
struct sockaddr_storage addr;
if (!addr_str)
return 0;
if (sock_pton(addr_str, 0, &addr) < 0)
return 0;
return 1;
}
uint16_t
sock_port (const struct sockaddr *sa)
{
switch (sa->sa_family) {
case AF_INET: {
struct sockaddr_in *sin = (struct sockaddr_in *) sa;
return ntohs(sin->sin_port);
}
#ifdef IPv6
case AF_INET6: {
struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) sa;
return ntohs(sin6->sin6_port);
}
#endif
default:
return 0;
}
return 0;
}
evutil_socket_t
udp_client (const char *host, const char *serv,
struct sockaddr **saptr, socklen_t *lenp)
{
evutil_socket_t sockfd;
int n;
struct addrinfo hints, *res, *ressave;
memset (&hints, 0, sizeof(struct addrinfo));
hints.ai_family = AF_UNSPEC;
hints.ai_socktype = SOCK_DGRAM;
if ((n = getaddrinfo(host, serv, &hints, &res)) != 0) {
ccnet_warning ("udp_client error for %s, %s: %s",
host, serv, gai_strerror(n));
return -1;
}
ressave = res;
do {
sockfd = socket(res->ai_family, res->ai_socktype, res->ai_protocol);
if (sockfd >= 0)
break; /* success */
} while ( (res = res->ai_next) != NULL);
if (res == NULL) { /* errno set from final socket() */
ccnet_warning ("udp_client error for %s, %s", host, serv);
freeaddrinfo (ressave);
return -1;
}
*saptr = malloc(res->ai_addrlen);
memcpy(*saptr, res->ai_addr, res->ai_addrlen);
*lenp = res->ai_addrlen;
freeaddrinfo(ressave);
return (sockfd);
}
int
family_to_level(int family)
{
switch (family) {
case AF_INET:
return IPPROTO_IP;
#ifdef IPV6
case AF_INET6:
return IPPROTO_IPV6;
#endif
default:
return -1;
}
}
#ifdef WIN32
static int
mcast_join(evutil_socket_t sockfd, const struct sockaddr *grp, socklen_t grplen,
const char *ifname, u_int ifindex)
{
int optval = 3;
int sockm;
if (setsockopt(sockfd, IPPROTO_IP, IP_MULTICAST_TTL,
(char *)&optval, sizeof(int)) == SOCKET_ERROR) {
ccnet_warning("Fail to set socket multicast TTL, LastError=%d\n",
WSAGetLastError());
return -1;
}
optval = 0;
if (setsockopt(sockfd, IPPROTO_IP, IP_MULTICAST_LOOP,
(char *)&optval, sizeof(int)) == SOCKET_ERROR) {
ccnet_warning("Fail to set socket multicast LOOP, LastError=%d\n",
WSAGetLastError());
return -1;
}
sockm = WSAJoinLeaf (sockfd, grp, grplen, NULL, NULL, NULL, NULL, JL_BOTH);
if (sockm == INVALID_SOCKET) {
ccnet_warning("Fail to join multicast group, LastError=%d\n",
WSAGetLastError());
return -1;
}
return sockm;
}
evutil_socket_t
create_multicast_sock (struct sockaddr *sasend, socklen_t salen)
{
int ret;
const int on = 1;
evutil_socket_t recvfd;
struct sockaddr *sarecv;
recvfd = WSASocket (AF_INET, SOCK_DGRAM, 0, NULL, 0,
WSA_FLAG_MULTIPOINT_C_LEAF|WSA_FLAG_MULTIPOINT_D_LEAF
|WSA_FLAG_OVERLAPPED);
if (recvfd < 0) {
ccnet_warning ("Create multicast listen socket fails: %d\n",
WSAGetLastError());
return -1;
}
ret = setsockopt(recvfd, SOL_SOCKET, SO_REUSEADDR, (char *)&on, sizeof(on));
if (ret != 0) {
ccnet_warning("Failed to setsockopt SO_REUSEADDR, WSAGetLastError=%d\n",
WSAGetLastError());
return -1;
}
sarecv = malloc(salen);
memcpy(sarecv, sasend, salen);
struct sockaddr_in *saddr = (struct sockaddr_in *)sarecv;
saddr->sin_addr.s_addr = INADDR_ANY;
if (bind(recvfd, sarecv, salen) < 0) {
ccnet_warning("Bind multicast bind socket failed LastError=%d\n",
WSAGetLastError());
free (sarecv);
return -1;;
}
free (sarecv);
if (mcast_join(recvfd, sasend, salen, NULL, 0) < 0) {
ccnet_warning ("mcast_join error: %s\n", strerror(errno));
return -1;
}
return recvfd;
}
#else
static int
mcast_join(evutil_socket_t sockfd, const struct sockaddr *grp, socklen_t grplen,
const char *ifname, u_int ifindex)
{
#if (defined MCAST_JOIN_GROUP) && (! defined __APPLE__)
struct group_req req;
if (ifindex > 0) {
req.gr_interface = ifindex;
} else if (ifname != NULL) {
if ( (req.gr_interface = if_nametoindex(ifname)) == 0) {
errno = ENXIO; /* i/f name not found */
return(-1);
}
} else
req.gr_interface = 0;
if (grplen > sizeof(req.gr_group)) {
errno = EINVAL;
return -1;
}
memcpy(&req.gr_group, grp, grplen);
return (setsockopt(sockfd, family_to_level(grp->sa_family),
MCAST_JOIN_GROUP, &req, sizeof(req)));
#else
/* end mcast_join1 */
/* include mcast_join2 */
switch (grp->sa_family) {
case AF_INET: {
struct ip_mreq mreq;
struct ifreq ifreq;
memcpy(&mreq.imr_multiaddr.s_addr,
&((const struct sockaddr_in *) grp)->sin_addr,
sizeof(struct in_addr));
if (ifindex > 0) {
if (if_indextoname(ifindex, ifreq.ifr_name) == NULL) {
errno = ENXIO; /* i/f index not found */
return(-1);
}
goto doioctl;
} else if (ifname != NULL) {
strncpy(ifreq.ifr_name, ifname, IFNAMSIZ);
doioctl:
if (ioctl(sockfd, SIOCGIFADDR, &ifreq) < 0)
return(-1);
memcpy(&mreq.imr_interface,
&((struct sockaddr_in *) &ifreq.ifr_addr)->sin_addr,
sizeof(struct in_addr));
} else
mreq.imr_interface.s_addr = htonl(INADDR_ANY);
return(setsockopt(sockfd, IPPROTO_IP, IP_ADD_MEMBERSHIP,
&mreq, sizeof(mreq)));
}
/* end mcast_join2 */
/* include mcast_join3 */
#ifdef IPV6
#ifndef IPV6_JOIN_GROUP /* APIv0 compatibility */
#define IPV6_JOIN_GROUP IPV6_ADD_MEMBERSHIP
#endif
case AF_INET6: {
struct ipv6_mreq mreq6;
memcpy(&mreq6.ipv6mr_multiaddr,
&((const struct sockaddr_in6 *) grp)->sin6_addr,
sizeof(struct in6_addr));
if (ifindex > 0) {
mreq6.ipv6mr_interface = ifindex;
} else if (ifname != NULL) {
if ( (mreq6.ipv6mr_interface = if_nametoindex(ifname)) == 0) {
errno = ENXIO; /* i/f name not found */
return(-1);
}
} else
mreq6.ipv6mr_interface = 0;
return(setsockopt(sockfd, IPPROTO_IPV6, IPV6_JOIN_GROUP,
&mreq6, sizeof(mreq6)));
}
#endif
default:
errno = EAFNOSUPPORT;
return(-1);
}
#endif
return -1;
}
evutil_socket_t
create_multicast_sock (struct sockaddr *sasend, socklen_t salen)
{
int ret;
const int on = 1;
evutil_socket_t recvfd;
struct sockaddr *sarecv;
if ( (recvfd = socket (sasend->sa_family, SOCK_DGRAM, 0)) < 0) {
ccnet_warning ("Create multicast listen socket fails: %s\n",
strerror(errno));
return -1;
}
ret = setsockopt(recvfd, SOL_SOCKET, SO_REUSEADDR, (char *)&on, sizeof(on));
if (ret < 0)
ccnet_warning("Failed to setsockopt SO_REUSEADDR\n");
sarecv = malloc(salen);
memcpy(sarecv, sasend, salen);
if (bind(recvfd, sarecv, salen) < 0) {
ccnet_warning ("Bind multicast listen socket fails: %s\n",
strerror(errno));
free (sarecv);
return -1;
}
free (sarecv);
if (mcast_join(recvfd, sasend, salen, NULL, 0) < 0) {
ccnet_warning ("mcast_join error: %s\n", strerror(errno));
return -1;
}
return recvfd;
}
#endif
int
sockfd_to_family(evutil_socket_t sockfd)
{
struct sockaddr_storage ss;
socklen_t len;
len = sizeof(ss);
if (getsockname(sockfd, (struct sockaddr *) &ss, &len) < 0)
return(-1);
return(ss.ss_family);
}
int
mcast_set_loop(evutil_socket_t sockfd, int onoff)
{
#ifndef WIN32
switch (sockfd_to_family(sockfd)) {
case AF_INET: {
u_char flag;
flag = onoff;
return(setsockopt(sockfd, IPPROTO_IP, IP_MULTICAST_LOOP,
&flag, sizeof(flag)));
}
#ifdef IPV6
case AF_INET6: {
u_int flag;
flag = onoff;
return(setsockopt(sockfd, IPPROTO_IPV6, IPV6_MULTICAST_LOOP,
&flag, sizeof(flag)));
}
#endif
default:
errno = EAFNOSUPPORT;
return(-1);
}
#else
return -1;
#endif /* WIN32 */
}
================================================
FILE: lib/net.h
================================================
#ifndef CCNET_NET_H
#define CCNET_NET_H
#ifdef WIN32
#include
#include
#include
typedef int socklen_t;
#define UNUSED
#else
#include
#include
#include
#include
#include
#include
#include
#include
#endif
#if defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__)
#include
#else
#include
#endif
#ifdef WIN32
/* #define ECONNREFUSED WSAECONNREFUSED */
/* #define ECONNRESET WSAECONNRESET */
/* #define EHOSTUNREACH WSAEHOSTUNREACH */
/* #define EINPROGRESS WSAEINPROGRESS */
/* #define ENOTCONN WSAENOTCONN */
/* #define EWOULDBLOCK WSAEWOULDBLOCK */
#define sockerrno WSAGetLastError( )
#else
#include
#define sockerrno errno
#endif
#ifdef WIN32
extern int inet_aton(const char *string, struct in_addr *addr);
extern const char *inet_ntop(int af, const void *src, char *dst, size_t size);
extern int inet_pton(int af, const char *src, void *dst);
#endif
evutil_socket_t ccnet_net_open_tcp (const struct sockaddr *sa, int nonblock);
evutil_socket_t ccnet_net_bind_tcp (int port, int nonblock);
evutil_socket_t ccnet_net_accept (evutil_socket_t b,
struct sockaddr_storage *cliaddr,
socklen_t *len, int nonblock);
int ccnet_net_make_socket_blocking (evutil_socket_t fd);
/* bind to an IPv4 address, if (*port == 0) the port number will be returned */
evutil_socket_t ccnet_net_bind_v4 (const char *ipaddr, int *port);
int ccnet_netSetTOS ( evutil_socket_t s, int tos );
char *sock_ntop(const struct sockaddr *sa, socklen_t salen);
uint16_t sock_port (const struct sockaddr *sa);
/* return 1 if addr_str is a valid ipv4 or ipv6 address */
int is_valid_ipaddr (const char *addr_str);
/* return 0 if success, -1 if error */
int sock_pton (const char *addr_str, uint16_t port,
struct sockaddr_storage *sa);
evutil_socket_t udp_client (const char *host, const char *serv,
struct sockaddr **saptr, socklen_t *lenp);
int mcast_set_loop(evutil_socket_t sockfd, int onoff);
evutil_socket_t create_multicast_sock (struct sockaddr *sasend, socklen_t salen);
#endif
================================================
FILE: lib/repo.vala
================================================
namespace Seafile {
public class Repo : Object {
// Section 1: Basic information
// Members in this section should be set for every Repo object
// _id is for fast access from c code. id is for
// vala to automatically generate a property. Note,
// if a Vala property is start with _, it is not
// translated into a GObject property.
// Due to performance reasons, 'desc', 'magic', 'enc_version', 'root', 'repaired', 'random_key'
// are no longer returned in listing repos API.
public char _id[37];
public string id {
get { return (string)_id; }
set { Posix.memcpy(_id, value, 36); _id[36] = '\0'; }
}
public string _name;
public string name {
get { return _name; }
set { _name = value; }
}
public string _desc; // description
public string desc {
get { return _desc; }
set { _desc = value; }
}
// data format version
public int version { get; set; }
public int64 last_modify { get; set; }
public int64 size { get; set; }
public int64 file_count { get; set; }
public string last_modifier { get; set; }
public string head_cmmt_id { get; set; }
public string root { get; set; }
public int status { get; set; }
public string repo_type { get; set; }
// To be compatible with obsoleted SharedRepo object
public string repo_id { get; set; }
public string repo_name { get; set; }
public string repo_desc { get; set; }
public int64 last_modified { get; set; }
// Section 2: Encryption related
// Members in this section should be set for every Repo object
public bool encrypted { get; set; }
public string magic { get; set; }
public int enc_version { get; set; }
public string random_key { get; set; }
public string salt { get; set; }
public string pwd_hash { get; set; }
public string pwd_hash_algo { get; set; }
public string pwd_hash_params { get; set; }
// Section 3: Client only information
// Should be set for all client repo objects
public string _worktree;
public string worktree {
get { return _worktree; }
set { _worktree = value; }
}
public string _relay_id;
public string relay_id {
get { return _relay_id; }
set { _relay_id = value; }
}
public int last_sync_time { get; set; }
public bool auto_sync { get; set; }
public bool worktree_invalid { get; set; }
// Section 4: Server only information
// Should be set for all server repo objects
// virutal repo related
public bool is_virtual { get; set; }
public string origin_repo_id { get; set; }
public string origin_repo_name { get; set; }
public string origin_path { get; set; }
public bool is_original_owner { get; set; }
public string virtual_perm { get; set; }
// Used to access fs objects
public string store_id { get; set; }
public bool is_corrupted { get; set; }
public bool repaired { get; set; }
// Section 5: Share information
// Only set in list_share_repos, get_group_repos and get_inner_pub_repos, etc
public string share_type { get; set; } // personal, group or public
public string permission { get; set; }
public string user { get; set; } // share from or share to
public int group_id { get; set; } // used when shared to group
public string group_name { get; set; } // used when shared to group
// For list_owned_repo
public bool is_shared { get; set; }
}
public class TrashRepo : Object {
public string repo_id { get; set; }
public string repo_name { get; set; }
public string head_id { get; set; }
public string owner_id { get; set; }
public int64 size { get; set; }
public int64 del_time { get; set; }
public bool encrypted { get; set; }
}
public class SyncInfo : Object {
public string repo_id { get; set; }
public string head_commit { get; set; }
public bool deleted_on_relay { get; set; }
public bool bad_local_branch { get; set; }
public bool need_fetch { get; set; }
public bool need_upload { get; set; }
public bool need_merge { get; set; }
// public int last_sync_time { get; set; }
}
public class SyncTask : Object {
public bool is_sync_lan { get; set; }
public bool force_upload { get; set; }
public string dest_id { get; set; }
public string repo_id { get; set; }
public string state { get; set; }
public string error { get; set; }
public string tx_id { get; set; }
}
public class SessionInfo : Object {
public string datadir { get; set; }
}
public class CheckoutTask : Object {
public string repo_id { get; set; }
public string worktree { get; set; }
public int total_files { get; set; }
public int finished_files { get; set; }
}
public class DiffEntry : Object {
public string status { get; set; }
public string name { get; set; }
public string new_name { get; set; }
}
public class DeletedEntry : Object {
public string commit_id { get; set; }
public string obj_id { get; set; }
public string obj_name { get; set; }
public string basedir { get; set; }
public int mode { get; set; }
public int delete_time { get; set; }
public int64 file_size { get; set; }
public string scan_stat { get; set; }
}
public class RepoTokenInfo: Object {
public string repo_id { get; set; }
public string repo_name { get; set; }
public string repo_owner { get; set; }
public string email { get; set; }
public string token { get; set; }
public string peer_id { get; set; }
public string peer_ip { get; set; }
public string peer_name { get; set; }
public int64 sync_time { get; set; }
public string client_ver { get; set; }
}
public class SharedUser : Object {
public string repo_id { get; set; }
public string user { get; set; }
public string perm { get; set; }
}
public class SharedGroup : Object {
public string repo_id { get; set; }
public int group_id { get; set; }
public string perm { get; set; }
}
public class EncryptionInfo: Object {
public string repo_id { get; set; }
public string passwd { get; set; }
public int enc_version { get; set; }
public string magic { get; set; }
public string random_key { get; set; }
public string salt { get; set; }
public string pwd_hash { get; set; }
public string pwd_hash_algo { get; set; }
public string pwd_hash_params { get; set; }
}
public class UserQuotaUsage: Object {
public string user { get; set; }
public int64 usage { get; set; }
}
} // namespace
================================================
FILE: lib/rpc_table.py
================================================
"""
Define RPC functions needed to generate
"""
# [ , [] ]
func_table = [
[ "int", [] ],
[ "int", ["int"] ],
[ "int", ["int", "int"] ],
[ "int", ["int", "string"] ],
[ "int", ["int", "string", "int"] ],
[ "int", ["int", "string", "string"] ],
[ "int", ["int", "string", "int", "int"] ],
[ "int", ["int", "int", "string", "string"] ],
[ "int", ["int", "string", "string", "int"] ],
[ "int", ["string"] ],
[ "int", ["string", "int"] ],
[ "int", ["string", "int", "int"] ],
[ "int", ["string", "int", "string"] ],
[ "int", ["string", "int", "string", "string"] ],
[ "int", ["string", "int", "int", "string", "string"] ],
[ "int", ["string", "string"] ],
[ "int", ["string", "string", "int"] ],
[ "int", ["string", "string", "int64"] ],
[ "int", ["string", "string", "string"] ],
[ "int", ["string", "string", "int", "int"] ],
[ "int", ["string", "string", "string", "int"] ],
[ "int", ["string", "string", "string", "int", "string"] ],
[ "int", ["string", "string", "string", "string"] ],
[ "int", ["string", "string", "string", "string", "string"] ],
[ "int", ["string", "int", "string", "int", "int"] ],
[ "int", ["string", "string", "string", "string", "string", "string"] ],
[ "int", ["string", "string", "string", "int", "string", "string"] ],
[ "int", ["string", "string", "string", "string", "string", "string", "string"] ],
[ "int", ["string", "int64"]],
[ "int", ["int", "int64"]],
[ "int", ["int", "string", "int64"]],
[ "int64", [] ],
[ "int64", ["string"] ],
[ "int64", ["int"]],
[ "int64", ["int", "string"]],
[ "int64", ["string", "string"]],
[ "int64", ["string", "int", "string"] ],
[ "string", [] ],
[ "string", ["int"] ],
[ "string", ["int", "int"] ],
[ "string", ["int", "string"] ],
[ "string", ["int", "int", "string"] ],
[ "string", ["string"] ],
[ "string", ["string", "int"] ],
[ "string", ["string", "int", "int"] ],
[ "string", ["string", "string"] ],
[ "string", ["string", "string", "int"] ],
[ "string", ["string", "string", "int", "int"] ],
[ "string", ["string", "string", "string"] ],
[ "string", ["string", "string", "string", "int"] ],
[ "string", ["string", "string", "string", "string"] ],
[ "string", ["string", "string", "string", "string", "int"] ],
[ "string", ["string", "string", "string", "string", "int", "string", "string"] ],
[ "string", ["string", "string", "string", "string", "string"] ],
[ "string", ["string", "string", "string", "string", "string", "int"] ],
[ "string", ["string", "string", "string", "int", "string", "string"] ],
[ "string", ["string", "string", "string", "string", "string", "string", "int"] ],
[ "string", ["string", "string", "string", "string", "string", "string", "int", "int"] ],
[ "string", ["string", "string", "string", "string", "string", "string"] ],
[ "string", ["string", "string", "string", "string", "string", "string", "int64"] ],
[ "string", ["string", "string", "string", "string", "string", "string", "int64", "int"] ],
[ "string", ["string", "string", "string", "string", "string", "string", "string"] ],
[ "string", ["string", "string", "string", "string", "string", "string", "string", "int"] ],
[ "string", ["string", "string", "string", "string", "string", "string", "string", "int64"] ],
[ "string", ["string", "string", "string", "string", "string", "string", "string", "string", "string"] ],
[ "string", ["string", "string", "string", "string", "string", "string", "string", "int", "string", "string", "string"] ],
[ "string", ["string", "int", "string", "string", "string", "string", "string", "string", "string", "string", "string", "string", "int", "string"] ],
[ "string", ["string", "int", "string", "int", "int"] ],
[ "string", ["string", "int", "string", "string", "string"] ],
[ "objlist", [] ],
[ "objlist", ["int"] ],
[ "objlist", ["int", "int"] ],
[ "objlist", ["int", "string"] ],
[ "objlist", ["int", "int", "int"] ],
[ "objlist", ["int", "int", "string"] ],
[ "objlist", ["int", "int", "string", "int"] ],
[ "objlist", ["string"] ],
[ "objlist", ["string", "int"] ],
[ "objlist", ["string", "int", "int"] ],
[ "objlist", ["string", "int", "int", "int"] ],
[ "objlist", ["string", "int", "string"] ],
[ "objlist", ["string", "string"] ],
[ "objlist", ["string", "string", "string"] ],
[ "objlist", ["string", "string", "int"] ],
[ "objlist", ["string", "string", "string", "int"] ],
[ "objlist", ["string", "string", "int", "int"] ],
[ "objlist", ["string", "int", "int", "string"] ],
[ "objlist", ["string", "string", "int", "int", "int"] ],
[ "objlist", ["string", "string", "string", "int", "int", "int"] ],
[ "objlist", ["int", "string", "string", "int", "int"] ],
[ "objlist", ["string", "int", "string", "string", "string"] ],
[ "objlist", ["string", "int", "string", "int", "int"] ],
[ "objlist", ["string", "int", "string", "string", "int"] ],
[ "objlist", ["string", "string", "string", "string", "int", "int"] ],
[ "object", [] ],
[ "object", ["int"] ],
[ "object", ["string"] ],
[ "object", ["string", "string"] ],
[ "object", ["string", "string", "string"] ],
[ "object", ["string", "int", "string"] ],
[ "object", ["int", "string", "string"] ],
[ "object", ["int", "string", "string", "string", "string"] ],
[ "object", ["string", "string", "int", "int"] ],
[ "object", ["string", "string", "string", "int"] ],
[ "object", ["string", "string", "string", "string", "string", "string", "string", "int", "int"] ],
[ "object", ["string", "string", "string", "string", "string", "string", "int", "string", "int", "int"] ],
["json", ["string"]],
]
================================================
FILE: lib/seahub.vala
================================================
namespace Seafile {
public class ShareLinkInfo : Object {
public string repo_id { set; get; }
public string file_path { set; get; }
public string parent_dir { set; get; }
public string share_type { set; get; }
}
}
================================================
FILE: lib/search-result.vala
================================================
// compile this file with `valac --pkg posix repo.vala -C -H repo.h`
namespace Seafile {
public class SearchResult: Object {
public string _path;
public string path {
get { return _path; }
set { _path = value; }
}
public int64 size { get; set; }
public int64 mtime { get; set; }
public bool is_dir { set; get; }
}
} // namespace
================================================
FILE: lib/task.vala
================================================
namespace Seafile {
public class Task : Object {
public char _tx_id[37];
public string tx_id {
get { return (string)_tx_id; }
set { Posix.memcpy(_tx_id, value, 36); _tx_id[36] = '\0'; }
}
public string ttype { get; set; }
public string repo_id { get; set; }
public string dest_id { get; set; }
public string from_branch { get; set; }
public string to_branch { get; set; }
public string state { get; set; }
public string rt_state { get; set; }
public string error_str { get; set; }
public int block_total { get; set; }
public int block_done { get; set; } // the number of blocks sent or received
public int fs_objects_total { get; set; }
public int fs_objects_done { get; set; }
public int rate { get; set; }
public int64 _rsize; // the size remain
public int64 rsize{
get { return _rsize; }
set { _rsize = value; }
}
public int64 _dsize; // the size has done
public int64 dsize {
get { return _dsize; }
set { _dsize = value; }
}
}
public class CloneTask : Object {
public string state { get; set; }
public string error_str { get; set; }
public string repo_id { get; set; }
public string peer_id { get; set; }
public string repo_name { get; set; }
public string worktree { get; set; }
public string tx_id { get; set; }
}
} // namespace
================================================
FILE: lib/timer.c
================================================
/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
#if defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__)
#include
#include
#include
#else
#include
#endif
#include
#include "utils.h"
#include "timer.h"
struct CcnetTimer
{
struct event event;
struct timeval tv;
TimerCB func;
void *user_data;
uint8_t inCallback;
};
static void
timer_callback (evutil_socket_t fd, short event, void *vtimer)
{
int more;
struct CcnetTimer *timer = vtimer;
timer->inCallback = 1;
more = (*timer->func) (timer->user_data);
timer->inCallback = 0;
if (more)
evtimer_add (&timer->event, &timer->tv);
else
ccnet_timer_free (&timer);
}
void
ccnet_timer_free (CcnetTimer **ptimer)
{
CcnetTimer *timer;
/* zero out the argument passed in */
g_return_if_fail (ptimer);
timer = *ptimer;
*ptimer = NULL;
/* destroy the timer directly or via the command queue */
if (timer && !timer->inCallback)
{
event_del (&timer->event);
g_free (timer);
}
}
CcnetTimer*
ccnet_timer_new (TimerCB func,
void *user_data,
uint64_t interval_milliseconds)
{
CcnetTimer *timer = g_new0 (CcnetTimer, 1);
timer->tv = timeval_from_msec (interval_milliseconds);
timer->func = func;
timer->user_data = user_data;
evtimer_set (&timer->event, timer_callback, timer);
evtimer_add (&timer->event, &timer->tv);
return timer;
}
================================================
FILE: lib/timer.h
================================================
/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
#ifndef CCNET_TIMER_H
#define CCNET_TIMER_H
/* return TRUE to reschedule the timer, return FALSE to cancle the timer */
typedef int (*TimerCB) (void *data);
struct CcnetTimer;
typedef struct CcnetTimer CcnetTimer;
/**
* Calls timer_func(user_data) after the specified interval.
* The timer is freed if timer_func returns zero.
* Otherwise, it's called again after the same interval.
*/
CcnetTimer* ccnet_timer_new (TimerCB func,
void *user_data,
uint64_t timeout_milliseconds);
/**
* Frees a timer and sets the timer pointer to NULL.
*/
void ccnet_timer_free (CcnetTimer **timer);
#endif
================================================
FILE: lib/utils.c
================================================
/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
#include
#include "common.h"
#ifdef WIN32
#ifndef _WIN32_WINNT
#define _WIN32_WINNT 0x500
#endif
#endif
#include "utils.h"
#ifdef WIN32
#include
#include
#include
#include
#include
#include
#else
#include
#endif
#ifndef WIN32
#include
#include
#endif
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
extern int inet_pton(int af, const char *src, void *dst);
struct timeval
timeval_from_msec (uint64_t milliseconds)
{
struct timeval ret;
const uint64_t microseconds = milliseconds * 1000;
ret.tv_sec = microseconds / 1000000;
ret.tv_usec = microseconds % 1000000;
return ret;
}
void
rawdata_to_hex (const unsigned char *rawdata, char *hex_str, int n_bytes)
{
static const char hex[] = "0123456789abcdef";
int i;
for (i = 0; i < n_bytes; i++) {
unsigned int val = *rawdata++;
*hex_str++ = hex[val >> 4];
*hex_str++ = hex[val & 0xf];
}
*hex_str = '\0';
}
static unsigned hexval(char c)
{
if (c >= '0' && c <= '9')
return c - '0';
if (c >= 'a' && c <= 'f')
return c - 'a' + 10;
if (c >= 'A' && c <= 'F')
return c - 'A' + 10;
return ~0;
}
int
hex_to_rawdata (const char *hex_str, unsigned char *rawdata, int n_bytes)
{
int i;
for (i = 0; i < n_bytes; i++) {
unsigned int val = (hexval(hex_str[0]) << 4) | hexval(hex_str[1]);
if (val & ~0xff)
return -1;
*rawdata++ = val;
hex_str += 2;
}
return 0;
}
size_t
ccnet_strlcpy (char *dest, const char *src, size_t size)
{
size_t ret = strlen(src);
if (size) {
size_t len = (ret >= size) ? size - 1 : ret;
memcpy(dest, src, len);
dest[len] = '\0';
}
return ret;
}
int
checkdir (const char *dir)
{
SeafStat st;
#ifdef WIN32
/* remove trailing '\\' */
char *path = g_strdup(dir);
char *p = (char *)path + strlen(path) - 1;
while (*p == '\\' || *p == '/') *p-- = '\0';
if ((seaf_stat(dir, &st) < 0) || !S_ISDIR(st.st_mode)) {
g_free (path);
return -1;
}
g_free (path);
return 0;
#else
if ((seaf_stat(dir, &st) < 0) || !S_ISDIR(st.st_mode))
return -1;
return 0;
#endif
}
int
checkdir_with_mkdir (const char *dir)
{
#ifdef WIN32
int ret;
char *path = g_strdup(dir);
char *p = (char *)path + strlen(path) - 1;
while (*p == '\\' || *p == '/') *p-- = '\0';
ret = g_mkdir_with_parents(path, 0755);
g_free (path);
return ret;
#else
return g_mkdir_with_parents(dir, 0755);
#endif
}
int
objstore_mkdir (const char *base)
{
int ret;
int i, j, len;
static const char hex[] = "0123456789abcdef";
char subdir[SEAF_PATH_MAX];
if ( (ret = checkdir_with_mkdir(base)) < 0)
return ret;
len = strlen(base);
memcpy(subdir, base, len);
subdir[len] = G_DIR_SEPARATOR;
subdir[len+3] = '\0';
for (i = 0; i < 16; i++) {
subdir[len+1] = hex[i];
for (j = 0; j < 16; j++) {
subdir[len+2] = hex[j];
if ( (ret = checkdir_with_mkdir(subdir)) < 0)
return ret;
}
}
return 0;
}
void
objstore_get_path (char *path, const char *base, const char *obj_id)
{
int len;
len = strlen(base);
memcpy(path, base, len);
path[len] = G_DIR_SEPARATOR;
path[len+1] = obj_id[0];
path[len+2] = obj_id[1];
path[len+3] = G_DIR_SEPARATOR;
strcpy(path+len+4, obj_id+2);
}
#ifdef WIN32
/* UNIX epoch expressed in Windows time, the unit is 100 nanoseconds.
* See http://msdn.microsoft.com/en-us/library/ms724228
*/
#define UNIX_EPOCH 116444736000000000ULL
__time64_t
file_time_to_unix_time (FILETIME *ftime)
{
guint64 win_time, unix_time;
win_time = (guint64)ftime->dwLowDateTime + (((guint64)ftime->dwHighDateTime)<<32);
unix_time = (win_time - UNIX_EPOCH)/10000000;
return (__time64_t)unix_time;
}
static int
get_utc_file_time_fd (int fd, __time64_t *mtime, __time64_t *ctime)
{
HANDLE handle;
FILETIME write_time, create_time;
handle = (HANDLE)_get_osfhandle (fd);
if (handle == INVALID_HANDLE_VALUE) {
g_warning ("Failed to get handle from fd: %lu.\n", GetLastError());
return -1;
}
if (!GetFileTime (handle, &create_time, NULL, &write_time)) {
g_warning ("Failed to get file time: %lu.\n", GetLastError());
return -1;
}
*mtime = file_time_to_unix_time (&write_time);
*ctime = file_time_to_unix_time (&create_time);
return 0;
}
#define EPOCH_DIFF 11644473600ULL
inline static void
unix_time_to_file_time (guint64 unix_time, FILETIME *ftime)
{
guint64 win_time;
win_time = (unix_time + EPOCH_DIFF) * 10000000;
ftime->dwLowDateTime = win_time & 0xFFFFFFFF;
ftime->dwHighDateTime = (win_time >> 32) & 0xFFFFFFFF;
}
static int
set_utc_file_time (const char *path, const wchar_t *wpath, guint64 mtime)
{
HANDLE handle;
FILETIME write_time;
handle = CreateFileW (wpath,
GENERIC_WRITE,
FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE,
NULL,
OPEN_EXISTING,
FILE_FLAG_BACKUP_SEMANTICS,
NULL);
if (handle == INVALID_HANDLE_VALUE) {
g_warning ("Failed to open %s: %lu.\n", path, GetLastError());
return -1;
}
unix_time_to_file_time (mtime, &write_time);
if (!SetFileTime (handle, NULL, NULL, &write_time)) {
g_warning ("Failed to set file time for %s: %lu.\n", path, GetLastError());
CloseHandle (handle);
return -1;
}
CloseHandle (handle);
return 0;
}
wchar_t *
win32_long_path (const char *path)
{
char *long_path, *p;
wchar_t *long_path_w;
if (strncmp(path, "//", 2) == 0)
long_path = g_strconcat ("\\\\?\\UNC\\", path + 2, NULL);
else
long_path = g_strconcat ("\\\\?\\", path, NULL);
for (p = long_path; *p != 0; ++p)
if (*p == '/')
*p = '\\';
long_path_w = g_utf8_to_utf16 (long_path, -1, NULL, NULL, NULL);
g_free (long_path);
return long_path_w;
}
/* Convert a (possible) 8.3 format path to long path */
wchar_t *
win32_83_path_to_long_path (const char *worktree, const wchar_t *path, int path_len)
{
wchar_t *worktree_w = g_utf8_to_utf16 (worktree, -1, NULL, NULL, NULL);
int wt_len;
wchar_t *p;
wchar_t *fullpath_w = NULL;
wchar_t *fullpath_long = NULL;
wchar_t *ret = NULL;
char *fullpath;
for (p = worktree_w; *p != L'\0'; ++p)
if (*p == L'/')
*p = L'\\';
wt_len = wcslen(worktree_w);
fullpath_w = g_new0 (wchar_t, wt_len + path_len + 6);
wcscpy (fullpath_w, L"\\\\?\\");
wcscat (fullpath_w, worktree_w);
wcscat (fullpath_w, L"\\");
wcsncat (fullpath_w, path, path_len);
fullpath_long = g_new0 (wchar_t, SEAF_PATH_MAX);
DWORD n = GetLongPathNameW (fullpath_w, fullpath_long, SEAF_PATH_MAX);
if (n == 0) {
/* Failed. */
fullpath = g_utf16_to_utf8 (fullpath_w, -1, NULL, NULL, NULL);
g_free (fullpath);
goto out;
} else if (n > SEAF_PATH_MAX) {
/* In this case n is the necessary length for the buf. */
g_free (fullpath_long);
fullpath_long = g_new0 (wchar_t, n);
if (GetLongPathNameW (fullpath_w, fullpath_long, n) != (n - 1)) {
fullpath = g_utf16_to_utf8 (fullpath_w, -1, NULL, NULL, NULL);
g_free (fullpath);
goto out;
}
}
/* Remove "\\?\worktree\" from the beginning. */
ret = wcsdup (fullpath_long + wt_len + 5);
out:
g_free (worktree_w);
g_free (fullpath_w);
g_free (fullpath_long);
return ret;
}
static int
windows_error_to_errno (DWORD error)
{
switch (error) {
case ERROR_FILE_NOT_FOUND:
case ERROR_PATH_NOT_FOUND:
return ENOENT;
case ERROR_ALREADY_EXISTS:
return EEXIST;
case ERROR_ACCESS_DENIED:
case ERROR_SHARING_VIOLATION:
return EACCES;
case ERROR_DIR_NOT_EMPTY:
return ENOTEMPTY;
default:
return 0;
}
}
#endif
int
seaf_stat (const char *path, SeafStat *st)
{
#ifdef WIN32
wchar_t *wpath = win32_long_path (path);
WIN32_FILE_ATTRIBUTE_DATA attrs;
int ret = 0;
if (!GetFileAttributesExW (wpath, GetFileExInfoStandard, &attrs)) {
ret = -1;
errno = windows_error_to_errno (GetLastError());
goto out;
}
memset (st, 0, sizeof(SeafStat));
if (attrs.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY)
st->st_mode = (S_IFDIR | S_IRWXU);
else
st->st_mode = (S_IFREG | S_IRUSR | S_IWUSR);
st->st_atime = file_time_to_unix_time (&attrs.ftLastAccessTime);
st->st_ctime = file_time_to_unix_time (&attrs.ftCreationTime);
st->st_mtime = file_time_to_unix_time (&attrs.ftLastWriteTime);
st->st_size = ((((__int64)attrs.nFileSizeHigh)<<32) + attrs.nFileSizeLow);
out:
g_free (wpath);
return ret;
#else
return stat (path, st);
#endif
}
int
seaf_fstat (int fd, SeafStat *st)
{
#ifdef WIN32
if (_fstat64 (fd, st) < 0)
return -1;
if (get_utc_file_time_fd (fd, &st->st_mtime, &st->st_ctime) < 0)
return -1;
return 0;
#else
return fstat (fd, st);
#endif
}
#ifdef WIN32
void
seaf_stat_from_find_data (WIN32_FIND_DATAW *fdata, SeafStat *st)
{
memset (st, 0, sizeof(SeafStat));
if (fdata->dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY)
st->st_mode = (S_IFDIR | S_IRWXU);
else
st->st_mode = (S_IFREG | S_IRUSR | S_IWUSR);
st->st_atime = file_time_to_unix_time (&fdata->ftLastAccessTime);
st->st_ctime = file_time_to_unix_time (&fdata->ftCreationTime);
st->st_mtime = file_time_to_unix_time (&fdata->ftLastWriteTime);
st->st_size = ((((__int64)fdata->nFileSizeHigh)<<32) + fdata->nFileSizeLow);
}
#endif
int
seaf_set_file_time (const char *path, guint64 mtime)
{
#ifndef WIN32
struct stat st;
struct utimbuf times;
if (stat (path, &st) < 0) {
g_warning ("Failed to stat %s: %s.\n", path, strerror(errno));
return -1;
}
times.actime = st.st_atime;
times.modtime = (time_t)mtime;
return utime (path, ×);
#else
wchar_t *wpath = win32_long_path (path);
int ret = 0;
if (set_utc_file_time (path, wpath, mtime) < 0)
ret = -1;
g_free (wpath);
return ret;
#endif
}
int
seaf_util_unlink (const char *path)
{
#ifdef WIN32
wchar_t *wpath = win32_long_path (path);
int ret = 0;
if (!DeleteFileW (wpath)) {
ret = -1;
errno = windows_error_to_errno (GetLastError());
}
g_free (wpath);
return ret;
#else
return unlink (path);
#endif
}
int
seaf_util_rmdir (const char *path)
{
#ifdef WIN32
wchar_t *wpath = win32_long_path (path);
int ret = 0;
if (!RemoveDirectoryW (wpath)) {
ret = -1;
errno = windows_error_to_errno (GetLastError());
}
g_free (wpath);
return ret;
#else
return rmdir (path);
#endif
}
int
seaf_util_mkdir (const char *path, mode_t mode)
{
#ifdef WIN32
wchar_t *wpath = win32_long_path (path);
int ret = 0;
if (!CreateDirectoryW (wpath, NULL)) {
ret = -1;
errno = windows_error_to_errno (GetLastError());
}
g_free (wpath);
return ret;
#else
return mkdir (path, mode);
#endif
}
int
seaf_util_open (const char *path, int flags)
{
#ifdef WIN32
wchar_t *wpath;
DWORD access = 0;
HANDLE handle;
int fd;
access |= GENERIC_READ;
if (flags & (O_WRONLY | O_RDWR))
access |= GENERIC_WRITE;
wpath = win32_long_path (path);
handle = CreateFileW (wpath,
access,
FILE_SHARE_DELETE | FILE_SHARE_READ | FILE_SHARE_WRITE,
NULL,
OPEN_EXISTING,
0,
NULL);
if (handle == INVALID_HANDLE_VALUE) {
errno = windows_error_to_errno (GetLastError());
g_free (wpath);
return -1;
}
fd = _open_osfhandle ((intptr_t)handle, 0);
g_free (wpath);
return fd;
#else
return open (path, flags);
#endif
}
int
seaf_util_create (const char *path, int flags, mode_t mode)
{
#ifdef WIN32
wchar_t *wpath;
DWORD access = 0;
HANDLE handle;
int fd;
access |= GENERIC_READ;
if (flags & (O_WRONLY | O_RDWR))
access |= GENERIC_WRITE;
wpath = win32_long_path (path);
handle = CreateFileW (wpath,
access,
FILE_SHARE_DELETE | FILE_SHARE_READ | FILE_SHARE_WRITE,
NULL,
CREATE_ALWAYS,
0,
NULL);
if (handle == INVALID_HANDLE_VALUE) {
errno = windows_error_to_errno (GetLastError());
g_free (wpath);
return -1;
}
fd = _open_osfhandle ((intptr_t)handle, 0);
g_free (wpath);
return fd;
#else
return open (path, flags, mode);
#endif
}
int
seaf_util_rename (const char *oldpath, const char *newpath)
{
#ifdef WIN32
wchar_t *oldpathw = win32_long_path (oldpath);
wchar_t *newpathw = win32_long_path (newpath);
int ret = 0;
if (!MoveFileExW (oldpathw, newpathw, MOVEFILE_REPLACE_EXISTING)) {
ret = -1;
errno = windows_error_to_errno (GetLastError());
}
g_free (oldpathw);
g_free (newpathw);
return ret;
#else
return rename (oldpath, newpath);
#endif
}
gboolean
seaf_util_exists (const char *path)
{
#ifdef WIN32
wchar_t *wpath = win32_long_path (path);
DWORD attrs;
gboolean ret;
attrs = GetFileAttributesW (wpath);
ret = (attrs != INVALID_FILE_ATTRIBUTES);
g_free (wpath);
return ret;
#else
return (access (path, F_OK) == 0);
#endif
}
gint64
seaf_util_lseek (int fd, gint64 offset, int whence)
{
#ifdef WIN32
return _lseeki64 (fd, offset, whence);
#else
return lseek (fd, offset, whence);
#endif
}
#ifdef WIN32
int
traverse_directory_win32 (wchar_t *path_w,
DirentCallback callback,
void *user_data)
{
WIN32_FIND_DATAW fdata;
HANDLE handle;
wchar_t *pattern;
char *path;
int path_len_w;
DWORD error;
gboolean stop;
int ret = 0;
path = g_utf16_to_utf8 (path_w, -1, NULL, NULL, NULL);
path_len_w = wcslen(path_w);
pattern = g_new0 (wchar_t, (path_len_w + 3));
wcscpy (pattern, path_w);
wcscat (pattern, L"\\*");
handle = FindFirstFileW (pattern, &fdata);
if (handle == INVALID_HANDLE_VALUE) {
g_warning ("FindFirstFile failed %s: %lu.\n",
path, GetLastError());
ret = -1;
goto out;
}
do {
if (wcscmp (fdata.cFileName, L".") == 0 ||
wcscmp (fdata.cFileName, L"..") == 0)
continue;
++ret;
stop = FALSE;
if (callback (path_w, &fdata, user_data, &stop) < 0) {
ret = -1;
FindClose (handle);
goto out;
}
if (stop) {
FindClose (handle);
goto out;
}
} while (FindNextFileW (handle, &fdata) != 0);
error = GetLastError();
if (error != ERROR_NO_MORE_FILES) {
g_warning ("FindNextFile failed %s: %lu.\n",
path, error);
ret = -1;
}
FindClose (handle);
out:
g_free (path);
g_free (pattern);
return ret;
}
#endif
ssize_t
readn (int fd, void *buf, size_t n)
{
size_t n_left;
ssize_t n_read;
char *ptr;
ptr = buf;
n_left = n;
while (n_left > 0) {
n_read = read(fd, ptr, n_left);
if (n_read < 0) {
if (errno == EINTR)
n_read = 0;
else
return -1;
} else if (n_read == 0)
break;
n_left -= n_read;
ptr += n_read;
}
return (n - n_left);
}
ssize_t
writen (int fd, const void *buf, size_t n)
{
size_t n_left;
ssize_t n_written;
const char *ptr;
ptr = buf;
n_left = n;
while (n_left > 0) {
n_written = write(fd, ptr, n_left);
if (n_written <= 0) {
if (n_written < 0 && errno == EINTR)
n_written = 0;
else
return -1;
}
n_left -= n_written;
ptr += n_written;
}
return n;
}
ssize_t
recvn (evutil_socket_t fd, void *buf, size_t n)
{
size_t n_left;
ssize_t n_read;
char *ptr;
ptr = buf;
n_left = n;
while (n_left > 0) {
#ifndef WIN32
if ((n_read = read(fd, ptr, n_left)) < 0)
#else
if ((n_read = recv(fd, ptr, n_left, 0)) < 0)
#endif
{
if (errno == EINTR)
n_read = 0;
else
return -1;
} else if (n_read == 0)
break;
n_left -= n_read;
ptr += n_read;
}
return (n - n_left);
}
ssize_t
sendn (evutil_socket_t fd, const void *buf, size_t n)
{
size_t n_left;
ssize_t n_written;
const char *ptr;
ptr = buf;
n_left = n;
while (n_left > 0) {
#ifndef WIN32
if ( (n_written = write(fd, ptr, n_left)) <= 0)
#else
if ( (n_written = send(fd, ptr, n_left, 0)) <= 0)
#endif
{
if (n_written < 0 && errno == EINTR)
n_written = 0;
else
return -1;
}
n_left -= n_written;
ptr += n_written;
}
return n;
}
int copy_fd (int ifd, int ofd)
{
while (1) {
char buffer[8192];
ssize_t len = readn (ifd, buffer, sizeof(buffer));
if (!len)
break;
if (len < 0) {
close (ifd);
return -1;
}
if (writen (ofd, buffer, len) < 0) {
close (ofd);
return -1;
}
}
close(ifd);
return 0;
}
int copy_file (const char *dst, const char *src, int mode)
{
int fdi, fdo, status;
if ((fdi = g_open (src, O_RDONLY | O_BINARY, 0)) < 0)
return fdi;
fdo = g_open (dst, O_WRONLY | O_CREAT | O_EXCL | O_BINARY, mode);
if (fdo < 0 && errno == EEXIST) {
close (fdi);
return 0;
} else if (fdo < 0){
close (fdi);
return -1;
}
status = copy_fd (fdi, fdo);
if (close (fdo) != 0)
return -1;
return status;
}
char*
ccnet_expand_path (const char *src)
{
int total_len = 0;
#ifdef WIN32
char new_path[SEAF_PATH_MAX + 1];
char *p = new_path;
const char *q = src;
memset(new_path, 0, sizeof(new_path));
if (*src == '~') {
const char *home = g_get_home_dir();
total_len += strlen(home);
if (total_len > SEAF_PATH_MAX) {
return NULL;
}
memcpy(new_path, home, strlen(home));
p += strlen(new_path);
q++;
}
total_len += strlen(q);
if (total_len > SEAF_PATH_MAX) {
return NULL;
}
memcpy(p, q, strlen(q));
/* delete the charactor '\' or '/' at the end of the path
* because the function stat faied to deal with directory names
* with '\' or '/' in the end */
p = new_path + strlen(new_path) - 1;
while(*p == '\\' || *p == '/') *p-- = '\0';
return strdup (new_path);
#else
const char *next_in, *ntoken;
char new_path[SEAF_PATH_MAX + 1];
char *next_out;
int len;
/* special cases */
if (!src || *src == '\0')
return NULL;
if (strlen(src) > SEAF_PATH_MAX)
return NULL;
next_in = src;
next_out = new_path;
*next_out = '\0';
if (*src == '~') {
/* handle src start with '~' or '~' like '~plt' */
struct passwd *pw = NULL;
for ( ; *next_in != '/' && *next_in != '\0'; next_in++) ;
len = next_in - src;
if (len == 1) {
pw = getpwuid (geteuid());
} else {
/* copy '~' to new_path */
if (len > SEAF_PATH_MAX) {
return NULL;
}
memcpy (new_path, src, len);
new_path[len] = '\0';
pw = getpwnam (new_path + 1);
}
if (pw == NULL)
return NULL;
len = strlen (pw->pw_dir);
total_len += len;
if (total_len > SEAF_PATH_MAX) {
return NULL;
}
memcpy (new_path, pw->pw_dir, len);
next_out = new_path + len;
*next_out = '\0';
if (*next_in == '\0')
return strdup (new_path);
} else if (*src != '/') {
getcwd (new_path, SEAF_PATH_MAX);
for ( ; *next_out; next_out++) ; /* to '\0' */
}
while (*next_in != '\0') {
/* move ntoken to the next not '/' char */
for (ntoken = next_in; *ntoken == '/'; ntoken++) ;
for (next_in = ntoken; *next_in != '/'
&& *next_in != '\0'; next_in++) ;
len = next_in - ntoken;
if (len == 0) {
/* the path ends with '/', keep it */
*next_out++ = '/';
*next_out = '\0';
break;
}
if (len == 2 && ntoken[0] == '.' && ntoken[1] == '.')
{
/* '..' */
for (; next_out > new_path && *next_out != '/'; next_out--)
;
*next_out = '\0';
} else if (ntoken[0] != '.' || len != 1) {
/* not '.' */
*next_out++ = '/';
total_len += len;
if (total_len > SEAF_PATH_MAX) {
return NULL;
}
memcpy (next_out, ntoken, len);
next_out += len;
*next_out = '\0';
}
}
/* the final special case */
if (new_path[0] == '\0') {
new_path[0] = '/';
new_path[1] = '\0';
}
return strdup (new_path);
#endif
}
int
calculate_sha1 (unsigned char *sha1, const char *msg, int len)
{
SHA_CTX c;
if (len < 0)
len = strlen(msg);
SHA1_Init(&c);
SHA1_Update(&c, msg, len);
SHA1_Final(sha1, &c);
return 0;
}
uint32_t
ccnet_sha1_hash (const void *v)
{
/* 31 bit hash function */
const unsigned char *p = v;
uint32_t h = 0;
int i;
for (i = 0; i < 20; i++)
h = (h << 5) - h + p[i];
return h;
}
int
ccnet_sha1_equal (const void *v1,
const void *v2)
{
const unsigned char *p1 = v1;
const unsigned char *p2 = v2;
int i;
for (i = 0; i < 20; i++)
if (p1[i] != p2[i])
return 0;
return 1;
}
#ifndef WIN32
char* gen_uuid ()
{
char *uuid_str = g_malloc (37);
uuid_t uuid;
uuid_generate (uuid);
uuid_unparse_lower (uuid, uuid_str);
return uuid_str;
}
void gen_uuid_inplace (char *buf)
{
uuid_t uuid;
uuid_generate (uuid);
uuid_unparse_lower (uuid, buf);
}
gboolean
is_uuid_valid (const char *uuid_str)
{
uuid_t uuid;
if (!uuid_str)
return FALSE;
if (uuid_parse (uuid_str, uuid) < 0)
return FALSE;
return TRUE;
}
#else
char* gen_uuid ()
{
char *uuid_str = g_malloc (37);
unsigned char *str = NULL;
UUID uuid;
UuidCreate(&uuid);
UuidToString(&uuid, &str);
memcpy(uuid_str, str, 37);
RpcStringFree(&str);
return uuid_str;
}
void gen_uuid_inplace (char *buf)
{
unsigned char *str = NULL;
UUID uuid;
UuidCreate(&uuid);
UuidToString(&uuid, &str);
memcpy(buf, str, 37);
RpcStringFree(&str);
}
gboolean
is_uuid_valid (const char *uuid_str)
{
if (!uuid_str)
return FALSE;
UUID uuid;
if (UuidFromString((unsigned char *)uuid_str, &uuid) != RPC_S_OK)
return FALSE;
return TRUE;
}
#endif
gboolean
is_object_id_valid (const char *obj_id)
{
if (!obj_id)
return FALSE;
int len = strlen(obj_id);
int i;
char c;
if (len != 40)
return FALSE;
for (i = 0; i < len; ++i) {
c = obj_id[i];
if ((c >= '0' && c <= '9') || (c >= 'a' && c <= 'f'))
continue;
return FALSE;
}
return TRUE;
}
char* strjoin_n (const char *seperator, int argc, char **argv)
{
GString *buf;
int i;
char *str;
if (argc == 0)
return NULL;
buf = g_string_new (argv[0]);
for (i = 1; i < argc; ++i) {
g_string_append (buf, seperator);
g_string_append (buf, argv[i]);
}
str = buf->str;
g_string_free (buf, FALSE);
return str;
}
gboolean is_ipaddr_valid (const char *ip)
{
unsigned char buf[sizeof(struct in6_addr)];
if (evutil_inet_pton(AF_INET, ip, buf) == 1)
return TRUE;
if (evutil_inet_pton(AF_INET6, ip, buf) == 1)
return TRUE;
return FALSE;
}
void parse_key_value_pairs (char *string, KeyValueFunc func, void *data)
{
char *line = string, *next, *space;
char *key, *value;
while (*line) {
/* handle empty line */
if (*line == '\n') {
++line;
continue;
}
for (next = line; *next != '\n' && *next; ++next) ;
*next = '\0';
for (space = line; space < next && *space != ' '; ++space) ;
if (*space != ' ') {
g_warning ("Bad key value format: %s\n", line);
return;
}
*space = '\0';
key = line;
value = space + 1;
func (data, key, value);
line = next + 1;
}
}
void parse_key_value_pairs2 (char *string, KeyValueFunc2 func, void *data)
{
char *line = string, *next, *space;
char *key, *value;
while (*line) {
/* handle empty line */
if (*line == '\n') {
++line;
continue;
}
for (next = line; *next != '\n' && *next; ++next) ;
*next = '\0';
for (space = line; space < next && *space != ' '; ++space) ;
if (*space != ' ') {
g_warning ("Bad key value format: %s\n", line);
return;
}
*space = '\0';
key = line;
value = space + 1;
if (func(data, key, value) == FALSE)
break;
line = next + 1;
}
}
/**
* string_list_is_exists:
* @str_list:
* @string: a C string or %NULL
*
* Check whether @string is in @str_list.
*
* returns: %TRUE if @string is in str_list, %FALSE otherwise
*/
gboolean
string_list_is_exists (GList *str_list, const char *string)
{
GList *ptr;
for (ptr = str_list; ptr; ptr = ptr->next) {
if (g_strcmp0(string, ptr->data) == 0)
return TRUE;
}
return FALSE;
}
/**
* string_list_append:
* @str_list:
* @string: a C string (can't be %NULL
*
* Append @string to @str_list if it is in the list.
*
* returns: the new start of the list
*/
GList*
string_list_append (GList *str_list, const char *string)
{
g_return_val_if_fail (string != NULL, str_list);
if (string_list_is_exists(str_list, string))
return str_list;
str_list = g_list_append (str_list, g_strdup(string));
return str_list;
}
GList *
string_list_append_sorted (GList *str_list, const char *string)
{
g_return_val_if_fail (string != NULL, str_list);
if (string_list_is_exists(str_list, string))
return str_list;
str_list = g_list_insert_sorted_with_data (str_list, g_strdup(string),
(GCompareDataFunc)g_strcmp0, NULL);
return str_list;
}
GList *
string_list_remove (GList *str_list, const char *string)
{
g_return_val_if_fail (string != NULL, str_list);
GList *ptr;
for (ptr = str_list; ptr; ptr = ptr->next) {
if (strcmp((char *)ptr->data, string) == 0) {
g_free (ptr->data);
return g_list_delete_link (str_list, ptr);
}
}
return str_list;
}
void
string_list_free (GList *str_list)
{
GList *ptr = str_list;
while (ptr) {
g_free (ptr->data);
ptr = ptr->next;
}
g_list_free (str_list);
}
void
string_list_join (GList *str_list, GString *str, const char *seperator)
{
GList *ptr;
if (!str_list)
return;
ptr = str_list;
g_string_append (str, ptr->data);
for (ptr = ptr->next; ptr; ptr = ptr->next) {
g_string_append (str, seperator);
g_string_append (str, (char *)ptr->data);
}
}
GList *
string_list_parse (const char *list_in_str, const char *seperator)
{
if (!list_in_str)
return NULL;
GList *list = NULL;
char **array = g_strsplit (list_in_str, seperator, 0);
char **ptr;
for (ptr = array; *ptr; ptr++) {
list = g_list_prepend (list, g_strdup(*ptr));
}
list = g_list_reverse (list);
g_strfreev (array);
return list;
}
GList *
string_list_parse_sorted (const char *list_in_str, const char *seperator)
{
GList *list = string_list_parse (list_in_str, seperator);
return g_list_sort (list, (GCompareFunc)g_strcmp0);
}
gboolean
string_list_sorted_is_equal (GList *list1, GList *list2)
{
GList *ptr1 = list1, *ptr2 = list2;
while (ptr1 && ptr2) {
if (g_strcmp0(ptr1->data, ptr2->data) != 0)
break;
ptr1 = ptr1->next;
ptr2 = ptr2->next;
}
if (!ptr1 && !ptr2)
return TRUE;
return FALSE;
}
char **
ncopy_string_array (char **orig, int n)
{
char **ret = g_malloc (sizeof(char *) * n);
int i = 0;
for (; i < n; i++)
ret[i] = g_strdup(orig[i]);
return ret;
}
void
nfree_string_array (char **array, int n)
{
int i = 0;
for (; i < n; i++)
g_free (array[i]);
g_free (array);
}
gint64
get_current_time()
{
return g_get_real_time();
}
#ifdef WIN32
static SOCKET pg_serv_sock = INVALID_SOCKET;
static struct sockaddr_in pg_serv_addr;
/* pgpipe() should only be called in the main loop,
* since it accesses the static global socket.
*/
int
pgpipe (ccnet_pipe_t handles[2])
{
int len = sizeof( pg_serv_addr );
handles[0] = handles[1] = INVALID_SOCKET;
if (pg_serv_sock == INVALID_SOCKET) {
if ((pg_serv_sock = socket(AF_INET, SOCK_STREAM, 0)) == INVALID_SOCKET) {
g_warning("pgpipe failed to create socket: %d\n", WSAGetLastError());
return -1;
}
memset(&pg_serv_addr, 0, sizeof(pg_serv_addr));
pg_serv_addr.sin_family = AF_INET;
pg_serv_addr.sin_port = htons(0);
pg_serv_addr.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
if (bind(pg_serv_sock, (SOCKADDR *)&pg_serv_addr, len) == SOCKET_ERROR) {
g_warning("pgpipe failed to bind: %d\n", WSAGetLastError());
closesocket(pg_serv_sock);
pg_serv_sock = INVALID_SOCKET;
return -1;
}
if (listen(pg_serv_sock, SOMAXCONN) == SOCKET_ERROR) {
g_warning("pgpipe failed to listen: %d\n", WSAGetLastError());
closesocket(pg_serv_sock);
pg_serv_sock = INVALID_SOCKET;
return -1;
}
struct sockaddr_in tmp_addr;
int tmp_len = sizeof(tmp_addr);
if (getsockname(pg_serv_sock, (SOCKADDR *)&tmp_addr, &tmp_len) == SOCKET_ERROR) {
g_warning("pgpipe failed to getsockname: %d\n", WSAGetLastError());
closesocket(pg_serv_sock);
pg_serv_sock = INVALID_SOCKET;
return -1;
}
pg_serv_addr.sin_port = tmp_addr.sin_port;
}
if ((handles[1] = socket(PF_INET, SOCK_STREAM, 0)) == INVALID_SOCKET)
{
g_warning("pgpipe failed to create socket 2: %d\n", WSAGetLastError());
closesocket(pg_serv_sock);
pg_serv_sock = INVALID_SOCKET;
return -1;
}
if (connect(handles[1], (SOCKADDR *)&pg_serv_addr, len) == SOCKET_ERROR)
{
g_warning("pgpipe failed to connect socket: %d\n", WSAGetLastError());
closesocket(handles[1]);
handles[1] = INVALID_SOCKET;
closesocket(pg_serv_sock);
pg_serv_sock = INVALID_SOCKET;
return -1;
}
struct sockaddr_in client_addr;
int client_len = sizeof(client_addr);
if ((handles[0] = accept(pg_serv_sock, (SOCKADDR *)&client_addr, &client_len)) == INVALID_SOCKET)
{
g_warning("pgpipe failed to accept socket: %d\n", WSAGetLastError());
closesocket(handles[1]);
handles[1] = INVALID_SOCKET;
closesocket(pg_serv_sock);
pg_serv_sock = INVALID_SOCKET;
return -1;
}
return 0;
}
#endif
/*
The EVP_EncryptXXX and EVP_DecryptXXX series of functions have a
weird choice of returned value.
*/
#define ENC_SUCCESS 1
#define ENC_FAILURE 0
#define DEC_SUCCESS 1
#define DEC_FAILURE 0
#include
#include
/* Block size, in bytes. For AES it can only be 16 bytes. */
#define BLK_SIZE 16
#define ENCRYPT_BLK_SIZE BLK_SIZE
int
ccnet_encrypt (char **data_out,
int *out_len,
const char *data_in,
const int in_len,
const char *code,
const int code_len)
{
*data_out = NULL;
*out_len = -1;
/* check validation */
if ( data_in == NULL || in_len <= 0 ||
code == NULL || code_len <= 0) {
g_warning ("Invalid params.\n");
return -1;
}
EVP_CIPHER_CTX *ctx;
int ret, key_len;
unsigned char key[16], iv[16];
int blks;
/* Generate the derived key. We use AES 128 bits key,
Electroic-Code-Book cipher mode, and SHA1 as the message digest
when generating the key. IV is not used in ecb mode,
actually. */
key_len = EVP_BytesToKey (EVP_aes_128_ecb(), /* cipher mode */
EVP_sha1(), /* message digest */
NULL, /* salt */
(unsigned char*)code, /* passwd */
code_len,
3, /* iteration times */
key, /* the derived key */
iv); /* IV, initial vector */
/* The key should be 16 bytes long for our 128 bit key. */
if (key_len != 16) {
g_warning ("failed to init EVP_CIPHER_CTX.\n");
return -1;
}
/* Prepare CTX for encryption. */
ctx = EVP_CIPHER_CTX_new ();
ret = EVP_EncryptInit_ex (ctx,
EVP_aes_128_ecb(), /* cipher mode */
NULL, /* engine, NULL for default */
key, /* derived key */
iv); /* initial vector */
if (ret == ENC_FAILURE){
EVP_CIPHER_CTX_free (ctx);
return -1;
}
/* Allocating output buffer. */
/*
For EVP symmetric encryption, padding is always used __even if__
data size is a multiple of block size, in which case the padding
length is the block size. so we have the following:
*/
blks = (in_len / BLK_SIZE) + 1;
*data_out = (char *)g_malloc (blks * BLK_SIZE);
if (*data_out == NULL) {
g_warning ("failed to allocate the output buffer.\n");
goto enc_error;
}
int update_len, final_len;
/* Do the encryption. */
ret = EVP_EncryptUpdate (ctx,
(unsigned char*)*data_out,
&update_len,
(unsigned char*)data_in,
in_len);
if (ret == ENC_FAILURE)
goto enc_error;
/* Finish the possible partial block. */
ret = EVP_EncryptFinal_ex (ctx,
(unsigned char*)*data_out + update_len,
&final_len);
*out_len = update_len + final_len;
/* out_len should be equal to the allocated buffer size. */
if (ret == ENC_FAILURE || *out_len != (blks * BLK_SIZE))
goto enc_error;
EVP_CIPHER_CTX_free (ctx);
return 0;
enc_error:
EVP_CIPHER_CTX_free (ctx);
*out_len = -1;
if (*data_out != NULL)
g_free (*data_out);
*data_out = NULL;
return -1;
}
int
ccnet_decrypt (char **data_out,
int *out_len,
const char *data_in,
const int in_len,
const char *code,
const int code_len)
{
*data_out = NULL;
*out_len = -1;
/* Check validation. Because padding is always used, in_len must
* be a multiple of BLK_SIZE */
if ( data_in == NULL || in_len <= 0 || in_len % BLK_SIZE != 0 ||
code == NULL || code_len <= 0) {
g_warning ("Invalid param(s).\n");
return -1;
}
EVP_CIPHER_CTX *ctx;
int ret, key_len;
unsigned char key[16], iv[16];
/* Generate the derived key. We use AES 128 bits key,
Electroic-Code-Book cipher mode, and SHA1 as the message digest
when generating the key. IV is not used in ecb mode,
actually. */
key_len = EVP_BytesToKey (EVP_aes_128_ecb(), /* cipher mode */
EVP_sha1(), /* message digest */
NULL, /* salt */
(unsigned char*)code, /* passwd */
code_len,
3, /* iteration times */
key, /* the derived key */
iv); /* IV, initial vector */
/* The key should be 16 bytes long for our 128 bit key. */
if (key_len != 16) {
g_warning ("failed to init EVP_CIPHER_CTX.\n");
return -1;
}
/* Prepare CTX for decryption. */
ctx = EVP_CIPHER_CTX_new ();
ret = EVP_DecryptInit_ex (ctx,
EVP_aes_128_ecb(), /* cipher mode */
NULL, /* engine, NULL for default */
key, /* derived key */
iv); /* initial vector */
if (ret == DEC_FAILURE)
return -1;
/* Allocating output buffer. */
*data_out = (char *)g_malloc (in_len);
if (*data_out == NULL) {
g_warning ("failed to allocate the output buffer.\n");
goto dec_error;
}
int update_len, final_len;
/* Do the decryption. */
ret = EVP_DecryptUpdate (ctx,
(unsigned char*)*data_out,
&update_len,
(unsigned char*)data_in,
in_len);
if (ret == DEC_FAILURE)
goto dec_error;
/* Finish the possible partial block. */
ret = EVP_DecryptFinal_ex (ctx,
(unsigned char*)*data_out + update_len,
&final_len);
*out_len = update_len + final_len;
/* out_len should be smaller than in_len. */
if (ret == DEC_FAILURE || *out_len > in_len)
goto dec_error;
EVP_CIPHER_CTX_free (ctx);
return 0;
dec_error:
EVP_CIPHER_CTX_free (ctx);
*out_len = -1;
if (*data_out != NULL)
g_free (*data_out);
*data_out = NULL;
return -1;
}
/* convert locale specific input to utf8 encoded string */
char *ccnet_locale_to_utf8 (const gchar *src)
{
if (!src)
return NULL;
gsize bytes_read = 0;
gsize bytes_written = 0;
GError *error = NULL;
gchar *dst = NULL;
dst = g_locale_to_utf8
(src, /* locale specific string */
strlen(src), /* len of src */
&bytes_read, /* length processed */
&bytes_written, /* output length */
&error);
if (error) {
return NULL;
}
return dst;
}
/* convert utf8 input to locale specific string */
char *ccnet_locale_from_utf8 (const gchar *src)
{
if (!src)
return NULL;
gsize bytes_read = 0;
gsize bytes_written = 0;
GError *error = NULL;
gchar *dst = NULL;
dst = g_locale_from_utf8
(src, /* locale specific string */
strlen(src), /* len of src */
&bytes_read, /* length processed */
&bytes_written, /* output length */
&error);
if (error) {
return NULL;
}
return dst;
}
#ifdef WIN32
static HANDLE
get_process_handle (const char *process_name_in)
{
char name[256];
if (strstr(process_name_in, ".exe")) {
snprintf (name, sizeof(name), "%s", process_name_in);
} else {
snprintf (name, sizeof(name), "%s.exe", process_name_in);
}
DWORD aProcesses[1024], cbNeeded, cProcesses;
if (!EnumProcesses(aProcesses, sizeof(aProcesses), &cbNeeded))
return NULL;
/* Calculate how many process identifiers were returned. */
cProcesses = cbNeeded / sizeof(DWORD);
HANDLE hProcess;
HMODULE hMod;
char process_name[SEAF_PATH_MAX];
unsigned int i;
for (i = 0; i < cProcesses; i++) {
if(aProcesses[i] == 0)
continue;
hProcess = OpenProcess (PROCESS_ALL_ACCESS, FALSE, aProcesses[i]);
if (!hProcess)
continue;
if (EnumProcessModules(hProcess, &hMod, sizeof(hMod), &cbNeeded)) {
GetModuleBaseName(hProcess, hMod, process_name,
sizeof(process_name)/sizeof(char));
}
if (strcasecmp(process_name, name) == 0)
return hProcess;
else {
CloseHandle(hProcess);
}
}
/* Not found */
return NULL;
}
int count_process (const char *process_name_in)
{
char name[SEAF_PATH_MAX];
char process_name[SEAF_PATH_MAX];
DWORD aProcesses[1024], cbNeeded, cProcesses;
HANDLE hProcess;
HMODULE hMods[1024];
int count = 0;
int i, j;
if (strstr(process_name_in, ".exe")) {
snprintf (name, sizeof(name), "%s", process_name_in);
} else {
snprintf (name, sizeof(name), "%s.exe", process_name_in);
}
if (!EnumProcesses(aProcesses, sizeof(aProcesses), &cbNeeded)) {
return 0;
}
/* Calculate how many process identifiers were returned. */
cProcesses = cbNeeded / sizeof(DWORD);
for (i = 0; i < cProcesses; i++) {
if(aProcesses[i] == 0)
continue;
hProcess = OpenProcess (PROCESS_QUERY_INFORMATION | PROCESS_VM_READ, FALSE, aProcesses[i]);
if (!hProcess) {
continue;
}
if (EnumProcessModules(hProcess, hMods, sizeof(hMods), &cbNeeded)) {
for (j = 0; j < cbNeeded / sizeof(HMODULE); j++) {
if (GetModuleBaseName(hProcess, hMods[j], process_name,
sizeof(process_name))) {
if (strcasecmp(process_name, name) == 0)
count++;
}
}
}
CloseHandle(hProcess);
}
return count;
}
gboolean
process_is_running (const char *process_name)
{
HANDLE proc_handle = get_process_handle(process_name);
if (proc_handle) {
CloseHandle(proc_handle);
return TRUE;
} else {
return FALSE;
}
}
int
win32_kill_process (const char *process_name)
{
HANDLE proc_handle = get_process_handle(process_name);
if (proc_handle) {
TerminateProcess(proc_handle, 0);
CloseHandle(proc_handle);
return 0;
} else {
return -1;
}
}
int
win32_spawn_process (char *cmdline_in, char *working_directory_in)
{
if (!cmdline_in)
return -1;
wchar_t *cmdline_w = NULL;
wchar_t *working_directory_w = NULL;
cmdline_w = wchar_from_utf8 (cmdline_in);
if (!cmdline_in) {
g_warning ("failed to convert cmdline_in");
return -1;
}
if (working_directory_in) {
working_directory_w = wchar_from_utf8 (working_directory_in);
if (!working_directory_w) {
g_warning ("failed to convert working_directory_in");
return -1;
}
}
STARTUPINFOW si;
PROCESS_INFORMATION pi;
unsigned flags;
BOOL success;
/* we want to execute seafile without crreating a console window */
flags = CREATE_NO_WINDOW;
memset(&si, 0, sizeof(si));
si.cb = sizeof(si);
si.dwFlags = STARTF_USESTDHANDLES | STARTF_FORCEOFFFEEDBACK;
si.hStdInput = (HANDLE) _get_osfhandle(0);
si.hStdOutput = (HANDLE) _get_osfhandle(1);
si.hStdError = (HANDLE) _get_osfhandle(2);
memset(&pi, 0, sizeof(pi));
success = CreateProcessW (NULL, cmdline_w, NULL, NULL, TRUE, flags,
NULL, working_directory_w, &si, &pi);
free (cmdline_w);
if (working_directory_w) free (working_directory_w);
if (!success) {
g_warning ("failed to fork_process: GLE=%lu\n", GetLastError());
return -1;
}
/* close the handle of thread so that the process object can be freed by
* system
*/
CloseHandle(pi.hThread);
CloseHandle(pi.hProcess);
return 0;
}
char *
wchar_to_utf8 (const wchar_t *wch)
{
if (wch == NULL) {
return NULL;
}
char *utf8 = NULL;
int bufsize, len;
bufsize = WideCharToMultiByte
(CP_UTF8, /* multibyte code page */
0, /* flags */
wch, /* src */
-1, /* src len, -1 for all includes \0 */
utf8, /* dst */
0, /* dst buf len */
NULL, /* default char */
NULL); /* BOOL flag indicates default char is used */
if (bufsize <= 0) {
g_warning ("failed to convert a string from wchar to utf8 0");
return NULL;
}
utf8 = g_malloc(bufsize);
len = WideCharToMultiByte
(CP_UTF8, /* multibyte code page */
0, /* flags */
wch, /* src */
-1, /* src len, -1 for all includes \0 */
utf8, /* dst */
bufsize, /* dst buf len */
NULL, /* default char */
NULL); /* BOOL flag indicates default char is used */
if (len != bufsize) {
g_free (utf8);
g_warning ("failed to convert a string from wchar to utf8");
return NULL;
}
return utf8;
}
wchar_t *
wchar_from_utf8 (const char *utf8)
{
if (utf8 == NULL) {
return NULL;
}
wchar_t *wch = NULL;
int bufsize, len;
bufsize = MultiByteToWideChar
(CP_UTF8, /* multibyte code page */
0, /* flags */
utf8, /* src */
-1, /* src len, -1 for all includes \0 */
wch, /* dst */
0); /* dst buf len */
if (bufsize <= 0) {
g_warning ("failed to convert a string from wchar to utf8 0");
return NULL;
}
wch = g_malloc (bufsize * sizeof(wchar_t));
len = MultiByteToWideChar
(CP_UTF8, /* multibyte code page */
0, /* flags */
utf8, /* src */
-1, /* src len, -1 for all includes \0 */
wch, /* dst */
bufsize); /* dst buf len */
if (len != bufsize) {
g_free (wch);
g_warning ("failed to convert a string from utf8 to wchar");
return NULL;
}
return wch;
}
#endif /* ifdef WIN32 */
#ifdef __linux__
/* read the link of /proc/123/exe and compare with `process_name' */
static int
find_process_in_dirent(struct dirent *dir, const char *process_name)
{
char path[512];
/* fisrst construct a path like /proc/123/exe */
if (sprintf (path, "/proc/%s/exe", dir->d_name) < 0) {
return -1;
}
char buf[SEAF_PATH_MAX];
/* get the full path of exe */
ssize_t l = readlink(path, buf, SEAF_PATH_MAX);
if (l < 0)
return -1;
buf[l] = '\0';
/* get the base name of exe */
char *base = g_path_get_basename(buf);
int ret = strcmp(base, process_name);
g_free(base);
if (ret == 0)
return atoi(dir->d_name);
else
return -1;
}
/* read the /proc fs to determine whether some process is running */
gboolean process_is_running (const char *process_name)
{
DIR *proc_dir = opendir("/proc");
if (!proc_dir) {
fprintf (stderr, "failed to open /proc/ dir\n");
return FALSE;
}
struct dirent *subdir = NULL;
while ((subdir = readdir(proc_dir))) {
char first = subdir->d_name[0];
/* /proc/[1-9][0-9]* */
if (first > '9' || first < '1')
continue;
int pid = find_process_in_dirent(subdir, process_name);
if (pid > 0) {
closedir(proc_dir);
return TRUE;
}
}
closedir(proc_dir);
return FALSE;
}
int count_process(const char *process_name)
{
int count = 0;
DIR *proc_dir = opendir("/proc");
if (!proc_dir) {
g_warning ("failed to open /proc/ :%s\n", strerror(errno));
return FALSE;
}
struct dirent *subdir = NULL;
while ((subdir = readdir(proc_dir))) {
char first = subdir->d_name[0];
/* /proc/[1-9][0-9]* */
if (first > '9' || first < '1')
continue;
if (find_process_in_dirent(subdir, process_name) > 0) {
count++;
}
}
closedir (proc_dir);
return count;
}
#endif
#ifdef __APPLE__
gboolean process_is_running (const char *process_name)
{
//TODO
return FALSE;
}
#endif
char*
ccnet_object_type_from_id (const char *object_id)
{
char *ptr;
if ( !(ptr = strchr(object_id, '/')) )
return NULL;
return g_strndup(object_id, ptr - object_id);
}
#ifdef WIN32
/**
* In Win32 we need to use _stat64 for files larger than 2GB. _stat64 needs
* the `path' argument in gbk encoding.
*/
#define STAT_STRUCT struct __stat64
#define STAT_FUNC win_stat64_utf8
static inline int
win_stat64_utf8 (char *path_utf8, STAT_STRUCT *sb)
{
wchar_t *path_w = wchar_from_utf8 (path_utf8);
int result = _wstat64 (path_w, sb);
free (path_w);
return result;
}
#else
#define STAT_STRUCT struct stat
#define STAT_FUNC stat
#endif
static gint64
calc_recursively (const char *path, GError **calc_error)
{
gint64 sum = 0;
GError *error = NULL;
GDir *folder = g_dir_open(path, 0, &error);
if (!folder) {
g_set_error (calc_error, CCNET_DOMAIN, 0,
"g_open() dir %s failed:%s\n", path, error->message);
return -1;
}
const char *name = NULL;
while ((name = g_dir_read_name(folder)) != NULL) {
STAT_STRUCT sb;
char *full_path= g_build_filename (path, name, NULL);
if (STAT_FUNC(full_path, &sb) < 0) {
g_set_error (calc_error, CCNET_DOMAIN, 0, "failed to stat on %s: %s\n",
full_path, strerror(errno));
g_free(full_path);
g_dir_close(folder);
return -1;
}
if (S_ISDIR(sb.st_mode)) {
gint64 size = calc_recursively(full_path, calc_error);
if (size < 0) {
g_free (full_path);
g_dir_close (folder);
return -1;
}
sum += size;
g_free(full_path);
} else if (S_ISREG(sb.st_mode)) {
sum += sb.st_size;
g_free(full_path);
}
}
g_dir_close (folder);
return sum;
}
gint64
ccnet_calc_directory_size (const char *path, GError **error)
{
return calc_recursively (path, error);
}
#ifdef WIN32
/*
* strtok_r code directly from glibc.git /string/strtok_r.c since windows
* doesn't have it.
*/
char *
strtok_r(char *s, const char *delim, char **save_ptr)
{
char *token;
if(s == NULL)
s = *save_ptr;
/* Scan leading delimiters. */
s += strspn(s, delim);
if(*s == '\0') {
*save_ptr = s;
return NULL;
}
/* Find the end of the token. */
token = s;
s = strpbrk(token, delim);
if(s == NULL) {
/* This token finishes the string. */
*save_ptr = strchr(token, '\0');
} else {
/* Terminate the token and make *SAVE_PTR point past it. */
*s = '\0';
*save_ptr = s + 1;
}
return token;
}
#endif
/* JSON related utils. For compatibility with json-glib. */
const char *
json_object_get_string_member (json_t *object, const char *key)
{
json_t *string = json_object_get (object, key);
if (!string)
return NULL;
return json_string_value (string);
}
gboolean
json_object_has_member (json_t *object, const char *key)
{
return (json_object_get (object, key) != NULL);
}
gint64
json_object_get_int_member (json_t *object, const char *key)
{
json_t *integer = json_object_get (object, key);
return json_integer_value (integer);
}
void
json_object_set_string_member (json_t *object, const char *key, const char *value)
{
json_object_set_new (object, key, json_string (value));
}
void
json_object_set_int_member (json_t *object, const char *key, gint64 value)
{
json_object_set_new (object, key, json_integer (value));
}
void
clean_utf8_data (char *data, int len)
{
const char *s, *e;
char *p;
gboolean is_valid;
s = data;
p = data;
while ((s - data) != len) {
is_valid = g_utf8_validate (s, len - (s - data), &e);
if (is_valid)
break;
if (s != e)
p += (e - s);
*p = '?';
++p;
s = e + 1;
}
}
char *
normalize_utf8_path (const char *path)
{
if (!g_utf8_validate (path, -1, NULL))
return NULL;
return g_utf8_normalize (path, -1, G_NORMALIZE_NFC);
}
/* zlib related wrapper functions. */
#define ZLIB_BUF_SIZE 16384
int
seaf_compress (guint8 *input, int inlen, guint8 **output, int *outlen)
{
int ret;
unsigned have;
z_stream strm;
guint8 out[ZLIB_BUF_SIZE];
GByteArray *barray;
if (inlen == 0)
return -1;
/* allocate deflate state */
strm.zalloc = Z_NULL;
strm.zfree = Z_NULL;
strm.opaque = Z_NULL;
ret = deflateInit(&strm, Z_DEFAULT_COMPRESSION);
if (ret != Z_OK) {
g_warning ("deflateInit failed.\n");
return -1;
}
strm.avail_in = inlen;
strm.next_in = input;
barray = g_byte_array_new ();
do {
strm.avail_out = ZLIB_BUF_SIZE;
strm.next_out = out;
ret = deflate(&strm, Z_FINISH); /* no bad return value */
have = ZLIB_BUF_SIZE - strm.avail_out;
g_byte_array_append (barray, out, have);
} while (ret != Z_STREAM_END);
*outlen = barray->len;
*output = g_byte_array_free (barray, FALSE);
/* clean up and return */
(void)deflateEnd(&strm);
return 0;
}
int
seaf_decompress (guint8 *input, int inlen, guint8 **output, int *outlen)
{
int ret;
unsigned have;
z_stream strm;
unsigned char out[ZLIB_BUF_SIZE];
GByteArray *barray;
if (inlen == 0) {
g_warning ("Empty input for zlib, invalid.\n");
return -1;
}
/* allocate inflate state */
strm.zalloc = Z_NULL;
strm.zfree = Z_NULL;
strm.opaque = Z_NULL;
strm.avail_in = 0;
strm.next_in = Z_NULL;
ret = inflateInit(&strm);
if (ret != Z_OK) {
g_warning ("inflateInit failed.\n");
return -1;
}
strm.avail_in = inlen;
strm.next_in = input;
barray = g_byte_array_new ();
do {
strm.avail_out = ZLIB_BUF_SIZE;
strm.next_out = out;
ret = inflate(&strm, Z_NO_FLUSH);
if (ret < 0) {
g_warning ("Failed to inflate.\n");
goto out;
}
have = ZLIB_BUF_SIZE - strm.avail_out;
g_byte_array_append (barray, out, have);
} while (ret != Z_STREAM_END);
out:
/* clean up and return */
(void)inflateEnd(&strm);
if (ret == Z_STREAM_END) {
*outlen = barray->len;
*output = g_byte_array_free (barray, FALSE);
return 0;
} else {
g_byte_array_free (barray, TRUE);
return -1;
}
}
char*
format_dir_path (const char *path)
{
int path_len = strlen (path);
char *rpath;
if (path[0] != '/') {
rpath = g_strconcat ("/", path, NULL);
path_len++;
} else {
rpath = g_strdup (path);
}
while (path_len > 1 && rpath[path_len-1] == '/') {
rpath[path_len-1] = '\0';
path_len--;
}
return rpath;
}
gboolean
is_empty_string (const char *str)
{
return !str || strcmp (str, "") == 0;
}
gboolean
is_permission_valid (const char *perm)
{
if (is_empty_string (perm)) {
return FALSE;
}
return strcmp (perm, "r") == 0 || strcmp (perm, "rw") == 0;
}
char *
seaf_key_file_get_string (GKeyFile *key_file,
const char *group,
const char *key,
GError **error)
{
char *v;
v = g_key_file_get_string (key_file, group, key, error);
if (!v || v[0] == '\0') {
g_free (v);
return NULL;
}
return g_strchomp(v);
}
gchar*
ccnet_key_file_get_string (GKeyFile *keyf,
const char *category,
const char *key)
{
gchar *v;
if (!g_key_file_has_key (keyf, category, key, NULL))
return NULL;
v = g_key_file_get_string (keyf, category, key, NULL);
if (v != NULL && v[0] == '\0') {
g_free(v);
return NULL;
}
return g_strchomp(v);
}
================================================
FILE: lib/utils.h
================================================
/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
#ifndef CCNET_UTILS_H
#define CCNET_UTILS_H
#ifdef WIN32
#ifndef _WIN32_WINNT
#define _WIN32_WINNT 0x500
#endif
#include
#endif
#include
#include
#include
#include
#include
#include
#include
#include
#include